code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
"""
parser.http.searchCompanyParser module (imdb package).
This module provides the HTMLSearchCompanyParser class (and the
search_company_parser instance), used to parse the results of a search
for a given company.
E.g., when searching for the name "Columbia Pictures", the parsed page would be:
http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_company_name, build_company_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCompanyParser(DOMBasicMovieParser):
"""Simply get the name of a company and the imdbID.
It's used by the DOMHTMLSearchCompanyParser class to return a result
for a direct match (when a search on IMDb results in a single
company, the web server sends directly the company page.
"""
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCompanyParser
_notDirectHitTitle = '<title>imdb company'
_titleBuilder = lambda self, x: build_company_name(x)
_linkPrefix = '/company/co'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'notes': "./text()[1]"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name')+(x.get('notes')
or u''), stripNotes=True)
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/company/co')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,),
{'kind': 'company', '_basic_parser': DOMBasicCompanyParser})
}
| Python |
"""
parser.http.searchPersonParser module (imdb package).
This module provides the HTMLSearchPersonParser class (and the
search_person_parser instance), used to parse the results of a search
for a given person.
E.g., when searching for the name "Mel Gibson", the parsed page would be:
http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
def _cleanName(n):
"""Clean the name in a title tag."""
if not n:
return u''
n = n.replace('Filmography by type for', '') # FIXME: temporary.
return n
class DOMBasicPersonParser(DOMBasicMovieParser):
"""Simply get the name of a person and the imdbID.
It's used by the DOMHTMLSearchPersonParser class to return a result
for a direct match (when a search on IMDb results in a single
person, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1)
_reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)',
re.I | re.M)
class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for persons."""
_BaseParser = DOMBasicPersonParser
_notDirectHitTitle = '<title>imdb name'
_titleBuilder = lambda self, x: build_name(x, canonical=True)
_linkPrefix = '/name/nm'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'index': "./text()[1]",
'akas': ".//div[@class='_imdbpyAKA']/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
analyze_name((x.get('name') or u'') + \
(x.get('index') or u''),
canonical=1), x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, '/name/nm')]/..",
attrs=_attrs)]
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:1024].lower():
html_string = _reAKASp.sub(
r'\1<div class="_imdbpyAKA">\2::</div>\3',
html_string)
return DOMHTMLSearchMovieParser.preprocess_string(self, html_string)
_OBJECTS = {
'search_person_parser': ((DOMHTMLSearchPersonParser,),
{'kind': 'person', '_basic_parser': DOMBasicPersonParser})
}
| Python |
"""
parser.http.utils module (imdb package).
This module provides miscellaneous utilities used by
the imdb.parser.http classes.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import logging
from imdb._exceptions import IMDbError
from imdb.utils import flatten, _Container
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.Character import Character
# Year, imdbIndex and kind.
re_yearKind_index = re.compile(r'(\([0-9\?]{4}(?:/[IVXLCDM]+)?\)(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)')
# Match imdb ids in href tags
re_imdbid = re.compile(r'(title/tt|name/nm|character/ch|company/co)([0-9]+)')
def analyze_imdbid(href):
"""Return an imdbID from an URL."""
if not href:
return None
match = re_imdbid.search(href)
if not match:
return None
return str(match.group(2))
_modify_keys = list(Movie.keys_tomodify_list) + list(Person.keys_tomodify_list)
def _putRefs(d, re_titles, re_names, re_characters, lastKey=None):
"""Iterate over the strings inside list items or dictionary values,
substitutes movie titles and person names with the (qv) references."""
if isinstance(d, list):
for i in xrange(len(d)):
if isinstance(d[i], (unicode, str)):
if lastKey in _modify_keys:
if re_names:
d[i] = re_names.sub(ur"'\1' (qv)", d[i])
if re_titles:
d[i] = re_titles.sub(ur'_\1_ (qv)', d[i])
if re_characters:
d[i] = re_characters.sub(ur'#\1# (qv)', d[i])
elif isinstance(d[i], (list, dict)):
_putRefs(d[i], re_titles, re_names, re_characters,
lastKey=lastKey)
elif isinstance(d, dict):
for k, v in d.items():
lastKey = k
if isinstance(v, (unicode, str)):
if lastKey in _modify_keys:
if re_names:
d[k] = re_names.sub(ur"'\1' (qv)", v)
if re_titles:
d[k] = re_titles.sub(ur'_\1_ (qv)', v)
if re_characters:
d[k] = re_characters.sub(ur'#\1# (qv)', v)
elif isinstance(v, (list, dict)):
_putRefs(d[k], re_titles, re_names, re_characters,
lastKey=lastKey)
# Handle HTML/XML/SGML entities.
from htmlentitydefs import entitydefs
entitydefs = entitydefs.copy()
entitydefsget = entitydefs.get
entitydefs['nbsp'] = ' '
sgmlentity = {'lt': '<', 'gt': '>', 'amp': '&', 'quot': '"', 'apos': '\''}
sgmlentityget = sgmlentity.get
_sgmlentkeys = sgmlentity.keys()
entcharrefs = {}
entcharrefsget = entcharrefs.get
for _k, _v in entitydefs.items():
if _k in _sgmlentkeys: continue
if _v[0:2] == '&#':
dec_code = _v[1:-1]
_v = unichr(int(_v[2:-1]))
entcharrefs[dec_code] = _v
else:
dec_code = '#' + str(ord(_v))
_v = unicode(_v, 'latin_1', 'replace')
entcharrefs[dec_code] = _v
entcharrefs[_k] = _v
del _sgmlentkeys, _k, _v
entcharrefs['#160'] = u' '
entcharrefs['#xA0'] = u' '
entcharrefs['#xa0'] = u' '
entcharrefs['#XA0'] = u' '
entcharrefs['#x22'] = u'"'
entcharrefs['#X22'] = u'"'
# convert &x26; to &, to make BeautifulSoup happy; beware that this
# leaves lone '&' in the html broken, but I assume this is better than
# the contrary...
entcharrefs['#38'] = u'&'
entcharrefs['#x26'] = u'&'
entcharrefs['#x26'] = u'&'
re_entcharrefs = re.compile('&(%s|\#160|\#\d{1,5}|\#x[0-9a-f]{1,4});' %
'|'.join(map(re.escape, entcharrefs)), re.I)
re_entcharrefssub = re_entcharrefs.sub
sgmlentity.update(dict([('#34', u'"'), ('#38', u'&'),
('#60', u'<'), ('#62', u'>'), ('#39', u"'")]))
re_sgmlref = re.compile('&(%s);' % '|'.join(map(re.escape, sgmlentity)))
re_sgmlrefsub = re_sgmlref.sub
# Matches XML-only single tags, like <br/> ; they are invalid in HTML,
# but widely used by IMDb web site. :-/
re_xmltags = re.compile('<([a-zA-Z]+)/>')
def _replXMLRef(match):
"""Replace the matched XML/HTML entities and references;
replace everything except sgml entities like <, >, ..."""
ref = match.group(1)
value = entcharrefsget(ref)
if value is None:
if ref[0] == '#':
ref_code = ref[1:]
if ref_code in ('34', '38', '60', '62', '39'):
return match.group(0)
elif ref_code[0].lower() == 'x':
#if ref[2:] == '26':
# # Don't convert &x26; to &, to make BeautifulSoup happy.
# return '&'
return unichr(int(ref[2:], 16))
else:
return unichr(int(ref[1:]))
else:
return ref
return value
def subXMLRefs(s):
"""Return the given html string with entity and char references
replaced."""
return re_entcharrefssub(_replXMLRef, s)
# XXX: no more used here; move it to mobile (they are imported by helpers, too)?
def _replSGMLRefs(match):
"""Replace the matched SGML entity."""
ref = match.group(1)
return sgmlentityget(ref, ref)
def subSGMLRefs(s):
"""Return the given html string with sgml entity and char references
replaced."""
return re_sgmlrefsub(_replSGMLRefs, s)
_b_p_logger = logging.getLogger('imdbpy.parser.http.build_person')
def build_person(txt, personID=None, billingPos=None,
roleID=None, accessSystem='http', modFunct=None):
"""Return a Person instance from the tipical <tr>...</tr> strings
found in the IMDb's web site."""
#if personID is None
# _b_p_logger.debug('empty name or personID for "%s"', txt)
notes = u''
role = u''
# Search the (optional) separator between name and role/notes.
if txt.find('....') != -1:
sep = '....'
elif txt.find('...') != -1:
sep = '...'
else:
sep = '...'
# Replace the first parenthesis, assuming there are only
# notes, after.
# Rationale: no imdbIndex is (ever?) showed on the web site.
txt = txt.replace('(', '...(', 1)
txt_split = txt.split(sep, 1)
name = txt_split[0].strip()
if len(txt_split) == 2:
role_comment = txt_split[1].strip()
# Strip common endings.
if role_comment[-4:] == ' and':
role_comment = role_comment[:-4].rstrip()
elif role_comment[-2:] == ' &':
role_comment = role_comment[:-2].rstrip()
elif role_comment[-6:] == '& ....':
role_comment = role_comment[:-6].rstrip()
# Get the notes.
if roleID is not None:
if not isinstance(roleID, list):
cmt_idx = role_comment.find('(')
if cmt_idx != -1:
role = role_comment[:cmt_idx].rstrip()
notes = role_comment[cmt_idx:]
else:
# Just a role, without notes.
role = role_comment
else:
role = role_comment
else:
# We're managing something that doesn't have a 'role', so
# everything are notes.
notes = role_comment
if role == '....': role = u''
roleNotes = []
# Manages multiple roleIDs.
if isinstance(roleID, list):
rolesplit = role.split('/')
role = []
for r in rolesplit:
nidx = r.find('(')
if nidx != -1:
role.append(r[:nidx].rstrip())
roleNotes.append(r[nidx:])
else:
role.append(r)
roleNotes.append(None)
lr = len(role)
lrid = len(roleID)
if lr > lrid:
roleID += [None] * (lrid - lr)
elif lr < lrid:
roleID = roleID[:lr]
for i, rid in enumerate(roleID):
if rid is not None:
roleID[i] = str(rid)
if lr == 1:
role = role[0]
roleID = roleID[0]
elif roleID is not None:
roleID = str(roleID)
if personID is not None:
personID = str(personID)
if (not name) or (personID is None):
# Set to 'debug', since build_person is expected to receive some crap.
_b_p_logger.debug('empty name or personID for "%s"', txt)
# XXX: return None if something strange is detected?
person = Person(name=name, personID=personID, currentRole=role,
roleID=roleID, notes=notes, billingPos=billingPos,
modFunct=modFunct, accessSystem=accessSystem)
if roleNotes and len(roleNotes) == len(roleID):
for idx, role in enumerate(person.currentRole):
if roleNotes[idx]:
role.notes = roleNotes[idx]
return person
_re_chrIDs = re.compile('[0-9]{7}')
_b_m_logger = logging.getLogger('imdbpy.parser.http.build_movie')
# To shrink spaces.
re_spaces = re.compile(r'\s+')
def build_movie(txt, movieID=None, roleID=None, status=None,
accessSystem='http', modFunct=None, _parsingCharacter=False,
_parsingCompany=False, year=None, chrRoles=None,
rolesNoChar=None, additionalNotes=None):
"""Given a string as normally seen on the "categorized" page of
a person on the IMDb's web site, returns a Movie instance."""
# FIXME: Oook, lets face it: build_movie and build_person are now
# two horrible sets of patches to support the new IMDb design. They
# must be rewritten from scratch.
if _parsingCharacter:
_defSep = ' Played by '
elif _parsingCompany:
_defSep = ' ... '
else:
_defSep = ' .... '
title = re_spaces.sub(' ', txt).strip()
# Split the role/notes from the movie title.
tsplit = title.split(_defSep, 1)
role = u''
notes = u''
roleNotes = []
if len(tsplit) == 2:
title = tsplit[0].rstrip()
role = tsplit[1].lstrip()
if title[-9:] == 'TV Series':
title = title[:-9].rstrip()
elif title[-14:] == 'TV mini-series':
title = title[:-14] + ' (mini)'
# Try to understand where the movie title ends.
while True:
if year:
break
if title[-1:] != ')':
# Ignore the silly "TV Series" notice.
if title[-9:] == 'TV Series':
title = title[:-9].rstrip()
continue
else:
# Just a title: stop here.
break
# Try to match paired parentheses; yes: sometimes there are
# parentheses inside comments...
nidx = title.rfind('(')
while (nidx != -1 and \
title[nidx:].count('(') != title[nidx:].count(')')):
nidx = title[:nidx].rfind('(')
# Unbalanced parentheses: stop here.
if nidx == -1: break
# The last item in parentheses seems to be a year: stop here.
first4 = title[nidx+1:nidx+5]
if (first4.isdigit() or first4 == '????') and \
title[nidx+5:nidx+6] in (')', '/'): break
# The last item in parentheses is a known kind: stop here.
if title[nidx+1:-1] in ('TV', 'V', 'mini', 'VG'): break
# Else, in parentheses there are some notes.
# XXX: should the notes in the role half be kept separated
# from the notes in the movie title half?
if notes: notes = '%s %s' % (title[nidx:], notes)
else: notes = title[nidx:]
title = title[:nidx].rstrip()
if year:
year = year.strip()
if title[-1] == ')':
fpIdx = title.rfind('(')
if fpIdx != -1:
if notes: notes = '%s %s' % (title[fpIdx:], notes)
else: notes = title[fpIdx:]
title = title[:fpIdx].rstrip()
title = u'%s (%s)' % (title, year)
if _parsingCharacter and roleID and not role:
roleID = None
if not roleID:
roleID = None
elif len(roleID) == 1:
roleID = roleID[0]
if not role and chrRoles and isinstance(roleID, (str, unicode)):
roleID = _re_chrIDs.findall(roleID)
role = ' / '.join(filter(None, chrRoles.split('@@')))
# Manages multiple roleIDs.
if isinstance(roleID, list):
tmprole = role.split('/')
role = []
for r in tmprole:
nidx = r.find('(')
if nidx != -1:
role.append(r[:nidx].rstrip())
roleNotes.append(r[nidx:])
else:
role.append(r)
roleNotes.append(None)
lr = len(role)
lrid = len(roleID)
if lr > lrid:
roleID += [None] * (lrid - lr)
elif lr < lrid:
roleID = roleID[:lr]
for i, rid in enumerate(roleID):
if rid is not None:
roleID[i] = str(rid)
if lr == 1:
role = role[0]
roleID = roleID[0]
elif roleID is not None:
roleID = str(roleID)
if movieID is not None:
movieID = str(movieID)
if (not title) or (movieID is None):
_b_m_logger.error('empty title or movieID for "%s"', txt)
if rolesNoChar:
rolesNoChar = filter(None, [x.strip() for x in rolesNoChar.split('/')])
if not role:
role = []
elif not isinstance(role, list):
role = [role]
role += rolesNoChar
notes = notes.strip()
if additionalNotes:
additionalNotes = re_spaces.sub(' ', additionalNotes).strip()
if notes:
notes += u' '
notes += additionalNotes
m = Movie(title=title, movieID=movieID, notes=notes, currentRole=role,
roleID=roleID, roleIsPerson=_parsingCharacter,
modFunct=modFunct, accessSystem=accessSystem)
if roleNotes and len(roleNotes) == len(roleID):
for idx, role in enumerate(m.currentRole):
try:
if roleNotes[idx]:
role.notes = roleNotes[idx]
except IndexError:
break
# Status can't be checked here, and must be detected by the parser.
if status:
m['status'] = status
return m
class DOMParserBase(object):
"""Base parser to handle HTML data from the IMDb's web server."""
_defGetRefs = False
_containsObjects = False
preprocessors = []
extractors = []
usingModule = None
_logger = logging.getLogger('imdbpy.parser.http.domparser')
def __init__(self, useModule=None):
"""Initialize the parser. useModule can be used to force it
to use 'BeautifulSoup' or 'lxml'; by default, it's auto-detected,
using 'lxml' if available and falling back to 'BeautifulSoup'
otherwise."""
# Module to use.
if useModule is None:
useModule = ('lxml', 'BeautifulSoup')
if not isinstance(useModule, (tuple, list)):
useModule = [useModule]
self._useModule = useModule
nrMods = len(useModule)
_gotError = False
for idx, mod in enumerate(useModule):
mod = mod.strip().lower()
try:
if mod == 'lxml':
from lxml.html import fromstring
from lxml.etree import tostring
self._is_xml_unicode = False
self.usingModule = 'lxml'
elif mod == 'beautifulsoup':
from bsouplxml.html import fromstring
from bsouplxml.etree import tostring
self._is_xml_unicode = True
self.usingModule = 'beautifulsoup'
else:
self._logger.warn('unknown module "%s"' % mod)
continue
self.fromstring = fromstring
self._tostring = tostring
if _gotError:
self._logger.warn('falling back to "%s"' % mod)
break
except ImportError, e:
if idx+1 >= nrMods:
# Raise the exception, if we don't have any more
# options to try.
raise IMDbError, 'unable to use any parser in %s: %s' % \
(str(useModule), str(e))
else:
self._logger.warn('unable to use "%s": %s' % (mod, str(e)))
_gotError = True
continue
else:
raise IMDbError, 'unable to use parsers in %s' % str(useModule)
# Fall-back defaults.
self._modFunct = None
self._as = 'http'
self._cname = self.__class__.__name__
self._init()
self.reset()
def reset(self):
"""Reset the parser."""
# Names and titles references.
self._namesRefs = {}
self._titlesRefs = {}
self._charactersRefs = {}
self._reset()
def _init(self):
"""Subclasses can override this method, if needed."""
pass
def _reset(self):
"""Subclasses can override this method, if needed."""
pass
def parse(self, html_string, getRefs=None, **kwds):
"""Return the dictionary generated from the given html string;
getRefs can be used to force the gathering of movies/persons/characters
references."""
self.reset()
if getRefs is not None:
self.getRefs = getRefs
else:
self.getRefs = self._defGetRefs
# Useful only for the testsuite.
if not isinstance(html_string, unicode):
html_string = unicode(html_string, 'latin_1', 'replace')
html_string = subXMLRefs(html_string)
# Temporary fix: self.parse_dom must work even for empty strings.
html_string = self.preprocess_string(html_string)
html_string = html_string.strip()
# tag attributes like title=""Family Guy"" will be
# converted to title=""Family Guy"" and this confuses BeautifulSoup.
if self.usingModule == 'beautifulsoup':
html_string = html_string.replace('""', '"')
#print html_string.encode('utf8')
if html_string:
dom = self.get_dom(html_string)
#print self.tostring(dom).encode('utf8')
try:
dom = self.preprocess_dom(dom)
except Exception, e:
self._logger.error('%s: caught exception preprocessing DOM',
self._cname, exc_info=True)
if self.getRefs:
try:
self.gather_refs(dom)
except Exception, e:
self._logger.warn('%s: unable to gather refs: %s',
self._cname, exc_info=True)
data = self.parse_dom(dom)
else:
data = {}
try:
data = self.postprocess_data(data)
except Exception, e:
self._logger.error('%s: caught exception postprocessing data',
self._cname, exc_info=True)
if self._containsObjects:
self.set_objects_params(data)
data = self.add_refs(data)
return data
def _build_empty_dom(self):
from bsouplxml import _bsoup
return _bsoup.BeautifulSoup('')
def get_dom(self, html_string):
"""Return a dom object, from the given string."""
try:
dom = self.fromstring(html_string)
if dom is None:
dom = self._build_empty_dom()
self._logger.error('%s: using a fake empty DOM', self._cname)
return dom
except Exception, e:
self._logger.error('%s: caught exception parsing DOM',
self._cname, exc_info=True)
return self._build_empty_dom()
def xpath(self, element, path):
"""Return elements matching the given XPath."""
try:
xpath_result = element.xpath(path)
if self._is_xml_unicode:
return xpath_result
result = []
for item in xpath_result:
if isinstance(item, str):
item = unicode(item)
result.append(item)
return result
except Exception, e:
self._logger.error('%s: caught exception extracting XPath "%s"',
self._cname, path, exc_info=True)
return []
def tostring(self, element):
"""Convert the element to a string."""
if isinstance(element, (unicode, str)):
return unicode(element)
else:
try:
return self._tostring(element, encoding=unicode)
except Exception, e:
self._logger.error('%s: unable to convert to string',
self._cname, exc_info=True)
return u''
def clone(self, element):
"""Clone an element."""
return self.fromstring(self.tostring(element))
def preprocess_string(self, html_string):
"""Here we can modify the text, before it's parsed."""
if not html_string:
return html_string
# Remove silly » chars.
html_string = html_string.replace(u' \xbb', u'')
try:
preprocessors = self.preprocessors
except AttributeError:
return html_string
for src, sub in preprocessors:
# re._pattern_type is present only since Python 2.5.
if callable(getattr(src, 'sub', None)):
html_string = src.sub(sub, html_string)
elif isinstance(src, str):
html_string = html_string.replace(src, sub)
elif callable(src):
try:
html_string = src(html_string)
except Exception, e:
_msg = '%s: caught exception preprocessing html'
self._logger.error(_msg, self._cname, exc_info=True)
continue
##print html_string.encode('utf8')
return html_string
def gather_refs(self, dom):
"""Collect references."""
grParser = GatherRefs(useModule=self._useModule)
grParser._as = self._as
grParser._modFunct = self._modFunct
refs = grParser.parse_dom(dom)
refs = grParser.postprocess_data(refs)
self._namesRefs = refs['names refs']
self._titlesRefs = refs['titles refs']
self._charactersRefs = refs['characters refs']
def preprocess_dom(self, dom):
"""Last chance to modify the dom, before the rules in self.extractors
are applied by the parse_dom method."""
return dom
def parse_dom(self, dom):
"""Parse the given dom according to the rules specified
in self.extractors."""
result = {}
for extractor in self.extractors:
##print extractor.label
if extractor.group is None:
elements = [(extractor.label, element)
for element in self.xpath(dom, extractor.path)]
else:
groups = self.xpath(dom, extractor.group)
elements = []
for group in groups:
group_key = self.xpath(group, extractor.group_key)
if not group_key: continue
group_key = group_key[0]
# XXX: always tries the conversion to unicode:
# BeautifulSoup.NavigableString is a subclass
# of unicode, and so it's never converted.
group_key = self.tostring(group_key)
normalizer = extractor.group_key_normalize
if normalizer is not None:
if callable(normalizer):
try:
group_key = normalizer(group_key)
except Exception, e:
_m = '%s: unable to apply group_key normalizer'
self._logger.error(_m, self._cname,
exc_info=True)
group_elements = self.xpath(group, extractor.path)
elements.extend([(group_key, element)
for element in group_elements])
for group_key, element in elements:
for attr in extractor.attrs:
if isinstance(attr.path, dict):
data = {}
for field in attr.path.keys():
path = attr.path[field]
value = self.xpath(element, path)
if not value:
data[field] = None
else:
# XXX: use u'' , to join?
data[field] = ''.join(value)
else:
data = self.xpath(element, attr.path)
if not data:
data = None
else:
data = attr.joiner.join(data)
if not data:
continue
attr_postprocess = attr.postprocess
if callable(attr_postprocess):
try:
data = attr_postprocess(data)
except Exception, e:
_m = '%s: unable to apply attr postprocess'
self._logger.error(_m, self._cname, exc_info=True)
key = attr.key
if key is None:
key = group_key
elif key.startswith('.'):
# assuming this is an xpath
try:
key = self.xpath(element, key)[0]
except IndexError:
self._logger.error('%s: XPath returned no items',
self._cname, exc_info=True)
elif key.startswith('self.'):
key = getattr(self, key[5:])
if attr.multi:
if key not in result:
result[key] = []
result[key].append(data)
else:
if isinstance(data, dict):
result.update(data)
else:
result[key] = data
return result
def postprocess_data(self, data):
"""Here we can modify the data."""
return data
def set_objects_params(self, data):
"""Set parameters of Movie/Person/... instances, since they are
not always set in the parser's code."""
for obj in flatten(data, yieldDictKeys=True, scalar=_Container):
obj.accessSystem = self._as
obj.modFunct = self._modFunct
def add_refs(self, data):
"""Modify data according to the expected output."""
if self.getRefs:
titl_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._titlesRefs.keys()])
if titl_re != ur'()': re_titles = re.compile(titl_re, re.U)
else: re_titles = None
nam_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._namesRefs.keys()])
if nam_re != ur'()': re_names = re.compile(nam_re, re.U)
else: re_names = None
chr_re = ur'(%s)' % '|'.join([re.escape(x) for x
in self._charactersRefs.keys()])
if chr_re != ur'()': re_characters = re.compile(chr_re, re.U)
else: re_characters = None
_putRefs(data, re_titles, re_names, re_characters)
return {'data': data, 'titlesRefs': self._titlesRefs,
'namesRefs': self._namesRefs,
'charactersRefs': self._charactersRefs}
class Extractor(object):
"""Instruct the DOM parser about how to parse a document."""
def __init__(self, label, path, attrs, group=None, group_key=None,
group_key_normalize=None):
"""Initialize an Extractor object, used to instruct the DOM parser
about how to parse a document."""
# rarely (never?) used, mostly for debugging purposes.
self.label = label
self.group = group
if group_key is None:
self.group_key = ".//text()"
else:
self.group_key = group_key
self.group_key_normalize = group_key_normalize
self.path = path
# A list of attributes to fetch.
if isinstance(attrs, Attribute):
attrs = [attrs]
self.attrs = attrs
def __repr__(self):
"""String representation of an Extractor object."""
r = '<Extractor id:%s (label=%s, path=%s, attrs=%s, group=%s, ' \
'group_key=%s group_key_normalize=%s)>' % (id(self),
self.label, self.path, repr(self.attrs), self.group,
self.group_key, self.group_key_normalize)
return r
class Attribute(object):
"""The attribute to consider, for a given node."""
def __init__(self, key, multi=False, path=None, joiner=None,
postprocess=None):
"""Initialize an Attribute object, used to specify the
attribute to consider, for a given node."""
# The key under which information will be saved; can be a string or an
# XPath. If None, the label of the containing extractor will be used.
self.key = key
self.multi = multi
self.path = path
if joiner is None:
joiner = ''
self.joiner = joiner
# Post-process this set of information.
self.postprocess = postprocess
def __repr__(self):
"""String representation of an Attribute object."""
r = '<Attribute id:%s (key=%s, multi=%s, path=%s, joiner=%s, ' \
'postprocess=%s)>' % (id(self), self.key,
self.multi, repr(self.path),
self.joiner, repr(self.postprocess))
return r
def _parse_ref(text, link, info):
"""Manage links to references."""
if link.find('/title/tt') != -1:
yearK = re_yearKind_index.match(info)
if yearK and yearK.start() == 0:
text += ' %s' % info[:yearK.end()]
return (text.replace('\n', ' '), link)
class GatherRefs(DOMParserBase):
"""Parser used to gather references to movies, persons and characters."""
_attrs = [Attribute(key=None, multi=True,
path={
'text': './text()',
'link': './@href',
'info': './following::text()[1]'
},
postprocess=lambda x: _parse_ref(x.get('text'), x.get('link'),
(x.get('info') or u'').strip()))]
extractors = [
Extractor(label='names refs',
path="//a[starts-with(@href, '/name/nm')][string-length(@href)=16]",
attrs=_attrs),
Extractor(label='titles refs',
path="//a[starts-with(@href, '/title/tt')]" \
"[string-length(@href)=17]",
attrs=_attrs),
Extractor(label='characters refs',
path="//a[starts-with(@href, '/character/ch')]" \
"[string-length(@href)=21]",
attrs=_attrs),
]
def postprocess_data(self, data):
result = {}
for item in ('names refs', 'titles refs', 'characters refs'):
result[item] = {}
for k, v in data.get(item, []):
if not v.endswith('/'): continue
imdbID = analyze_imdbid(v)
if item == 'names refs':
obj = Person(personID=imdbID, name=k,
accessSystem=self._as, modFunct=self._modFunct)
elif item == 'titles refs':
obj = Movie(movieID=imdbID, title=k,
accessSystem=self._as, modFunct=self._modFunct)
else:
obj = Character(characterID=imdbID, name=k,
accessSystem=self._as, modFunct=self._modFunct)
# XXX: companies aren't handled: are they ever found in text,
# as links to their page?
result[item][k] = obj
return result
def add_refs(self, data):
return data
| Python |
"""
imdb.parser.http._bsoup module (imdb.parser.http package).
This is the BeautifulSoup.py module, not modified; it's included here
so that it's not an external dependency.
Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2008, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.7a"
__copyright__ = "Copyright (c) 2004-2008 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
# First, the classes that represent markup elements.
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.contents.index(self)
if hasattr(replaceWith, 'parent') and replaceWith.parent == self.parent:
# We're replacing this element with one of its siblings.
index = self.parent.contents.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
self.parent.contents.remove(self)
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if (isinstance(newChild, basestring)
or isinstance(newChild, unicode)) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent != None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent == self:
index = self.find(newChild)
if index and index < position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isString(val):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
contents = [i for i in self.contents]
for i in contents:
if isinstance(i, Tag):
i.decompose()
else:
i.extract()
self.extract()
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isString(attrs):
kwargs['class'] = attrs
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if isList(markup) and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isString(markup):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst == True and type(matchAgainst) == types.BooleanType:
result = markup != None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isString(markup):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif isList(matchAgainst):
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isString(markup):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def isString(s):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is stringlike."""
try:
return isinstance(s, unicode) or isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif isList(portion):
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not isList(self.markupMassage):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.find('start_') == 0 or methodName.find('end_') == 0 \
or methodName.find('do_') == 0:
return SGMLParser.__getattr__(self, methodName)
elif methodName.find('__') != 0:
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
# Tags with just one string-owning child get the child as a
# 'string' property, so that soup.tag.string is shorthand for
# soup.tag.contents[0]
if len(self.currentTag.contents) == 1 and \
isinstance(self.currentTag.contents[0], NavigableString):
self.currentTag.string = self.currentTag.contents[0]
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers != None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers == None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join(map(lambda(x, y): ' %s="%s"' % (x, y), attrs))
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ['span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center']
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ['blockquote', 'div', 'fieldset', 'ins', 'del']
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ['address', 'form', 'p', 'pre']
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
['em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big']
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ['noscript']
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if type(sub) == types.TupleType:
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
"""
parser.http.bsouplxml.etree module (imdb.parser.http package).
This module adapts the beautifulsoup interface to lxml.etree module.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import _bsoup as BeautifulSoup
from _bsoup import Tag as Element
import bsoupxpath
# Not directly used by IMDbPY, but do not remove: it's used by IMDbPYKit,
# for example.
def fromstring(xml_string):
"""Return a DOM representation of the string."""
# We try to not use BeautifulSoup.BeautifulStoneSoup.XML_ENTITIES,
# for convertEntities.
return BeautifulSoup.BeautifulStoneSoup(xml_string,
convertEntities=None).findChild(True)
def tostring(element, encoding=None, pretty_print=False):
"""Return a string or unicode representation of an element."""
if encoding is unicode:
encoding = None
# For BeautifulSoup 3.1
#encArgs = {'prettyPrint': pretty_print}
#if encoding is not None:
# encArgs['encoding'] = encoding
#return element.encode(**encArgs)
return element.__str__(encoding, pretty_print)
def setattribute(tag, name, value):
tag[name] = value
def xpath(node, expr):
"""Apply an xpath expression to a node. Return a list of nodes."""
#path = bsoupxpath.Path(expr)
path = bsoupxpath.get_path(expr)
return path.apply(node)
# XXX: monkey patching the beautifulsoup tag class
class _EverythingIsNestable(dict):
""""Fake that every tag is nestable."""
def get(self, key, *args, **kwds):
return []
BeautifulSoup.BeautifulStoneSoup.NESTABLE_TAGS = _EverythingIsNestable()
BeautifulSoup.Tag.tag = property(fget=lambda self: self.name)
BeautifulSoup.Tag.attrib = property(fget=lambda self: self)
BeautifulSoup.Tag.text = property(fget=lambda self: self.string)
BeautifulSoup.Tag.set = setattribute
BeautifulSoup.Tag.getparent = lambda self: self.parent
BeautifulSoup.Tag.drop_tree = BeautifulSoup.Tag.extract
BeautifulSoup.Tag.xpath = xpath
# TODO: setting the text attribute for tags
| Python |
"""
parser.http.bsoupxpath module (imdb.parser.http package).
This module provides XPath support for BeautifulSoup.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__author__ = 'H. Turgut Uyar <uyar@tekir.org>'
__docformat__ = 'restructuredtext'
import re
import string
import _bsoup as BeautifulSoup
# XPath related enumerations and constants
AXIS_ANCESTOR = 'ancestor'
AXIS_ATTRIBUTE = 'attribute'
AXIS_CHILD = 'child'
AXIS_DESCENDANT = 'descendant'
AXIS_FOLLOWING = 'following'
AXIS_FOLLOWING_SIBLING = 'following-sibling'
AXIS_PRECEDING_SIBLING = 'preceding-sibling'
AXES = (AXIS_ANCESTOR, AXIS_ATTRIBUTE, AXIS_CHILD, AXIS_DESCENDANT,
AXIS_FOLLOWING, AXIS_FOLLOWING_SIBLING, AXIS_PRECEDING_SIBLING)
XPATH_FUNCTIONS = ('starts-with', 'string-length')
def tokenize_path(path):
"""Tokenize a location path into location steps. Return the list of steps.
If two steps are separated by a double slash, the double slashes are part of
the second step. If they are separated by only one slash, the slash is not
included in any of the steps.
"""
# form a list of tuples that mark the start and end positions of steps
separators = []
last_position = 0
i = -1
in_string = False
while i < len(path) - 1:
i = i + 1
if path[i] == "'":
in_string = not in_string
if in_string:
# slashes within strings are not step separators
continue
if path[i] == '/':
if i > 0:
separators.append((last_position, i))
if (path[i+1] == '/'):
last_position = i
i = i + 1
else:
last_position = i + 1
separators.append((last_position, len(path)))
steps = []
for start, end in separators:
steps.append(path[start:end])
return steps
class Path:
"""A location path.
"""
def __init__(self, path, parse=True):
self.path = path
self.steps = []
if parse:
if (path[0] == '/') and (path[1] != '/'):
# if not on the descendant axis, remove the leading slash
path = path[1:]
steps = tokenize_path(path)
for step in steps:
self.steps.append(PathStep(step))
def apply(self, node):
"""Apply the path to a node. Return the resulting list of nodes.
Apply the steps in the path sequentially by sending the output of each
step as input to the next step.
"""
# FIXME: this should return a node SET, not a node LIST
# or at least a list with no duplicates
if self.path[0] == '/':
# for an absolute path, start from the root
if not isinstance(node, BeautifulSoup.Tag) \
or (node.name != '[document]'):
node = node.findParent('[document]')
nodes = [node]
for step in self.steps:
nodes = step.apply(nodes)
return nodes
class PathStep:
"""A location step in a location path.
"""
AXIS_PATTERN = r"""(%s)::|@""" % '|'.join(AXES)
NODE_TEST_PATTERN = r"""\w+(\(\))?"""
PREDICATE_PATTERN = r"""\[(.*?)\]"""
LOCATION_STEP_PATTERN = r"""(%s)?(%s)((%s)*)""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN, PREDICATE_PATTERN)
_re_location_step = re.compile(LOCATION_STEP_PATTERN)
PREDICATE_NOT_PATTERN = r"""not\((.*?)\)"""
PREDICATE_AXIS_PATTERN = r"""(%s)?(%s)(='(.*?)')?""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN)
PREDICATE_FUNCTION_PATTERN = r"""(%s)\(([^,]+(,\s*[^,]+)*)?\)(=(.*))?""" \
% '|'.join(XPATH_FUNCTIONS)
_re_predicate_not = re.compile(PREDICATE_NOT_PATTERN)
_re_predicate_axis = re.compile(PREDICATE_AXIS_PATTERN)
_re_predicate_function = re.compile(PREDICATE_FUNCTION_PATTERN)
def __init__(self, step):
self.step = step
if (step == '.') or (step == '..'):
return
if step[:2] == '//':
default_axis = AXIS_DESCENDANT
step = step[2:]
else:
default_axis = AXIS_CHILD
step_match = self._re_location_step.match(step)
# determine the axis
axis = step_match.group(1)
if axis is None:
self.axis = default_axis
elif axis == '@':
self.axis = AXIS_ATTRIBUTE
else:
self.axis = step_match.group(2)
self.soup_args = {}
self.index = None
self.node_test = step_match.group(3)
if self.node_test == 'text()':
self.soup_args['text'] = True
else:
self.soup_args['name'] = self.node_test
self.checkers = []
predicates = step_match.group(5)
if predicates is not None:
predicates = [p for p in predicates[1:-1].split('][') if p]
for predicate in predicates:
checker = self.__parse_predicate(predicate)
if checker is not None:
self.checkers.append(checker)
def __parse_predicate(self, predicate):
"""Parse the predicate. Return a callable that can be used to filter
nodes. Update `self.soup_args` to take advantage of BeautifulSoup search
features.
"""
try:
position = int(predicate)
if self.axis == AXIS_DESCENDANT:
return PredicateFilter('position', value=position)
else:
# use the search limit feature instead of a checker
self.soup_args['limit'] = position
self.index = position - 1
return None
except ValueError:
pass
if predicate == "last()":
self.index = -1
return None
negate = self._re_predicate_not.match(predicate)
if negate:
predicate = negate.group(1)
function_match = self._re_predicate_function.match(predicate)
if function_match:
name = function_match.group(1)
arguments = function_match.group(2)
value = function_match.group(4)
if value is not None:
value = function_match.group(5)
return PredicateFilter(name, arguments, value)
axis_match = self._re_predicate_axis.match(predicate)
if axis_match:
axis = axis_match.group(1)
if axis is None:
axis = AXIS_CHILD
elif axis == '@':
axis = AXIS_ATTRIBUTE
if axis == AXIS_ATTRIBUTE:
# use the attribute search feature instead of a checker
attribute_name = axis_match.group(3)
if axis_match.group(5) is not None:
attribute_value = axis_match.group(6)
elif not negate:
attribute_value = True
else:
attribute_value = None
if not self.soup_args.has_key('attrs'):
self.soup_args['attrs'] = {}
self.soup_args['attrs'][attribute_name] = attribute_value
return None
elif axis == AXIS_CHILD:
node_test = axis_match.group(3)
node_value = axis_match.group(6)
return PredicateFilter('axis', node_test, value=node_value,
negate=negate)
raise NotImplementedError("This predicate is not implemented")
def apply(self, nodes):
"""Apply the step to a list of nodes. Return the list of nodes for the
next step.
"""
if self.step == '.':
return nodes
elif self.step == '..':
return [node.parent for node in nodes]
result = []
for node in nodes:
if self.axis == AXIS_CHILD:
found = node.findAll(recursive=False, **self.soup_args)
elif self.axis == AXIS_DESCENDANT:
found = node.findAll(recursive=True, **self.soup_args)
elif self.axis == AXIS_ATTRIBUTE:
try:
found = [node[self.node_test]]
except KeyError:
found = []
elif self.axis == AXIS_FOLLOWING_SIBLING:
found = node.findNextSiblings(**self.soup_args)
elif self.axis == AXIS_PRECEDING_SIBLING:
# TODO: make sure that the result is reverse ordered
found = node.findPreviousSiblings(**self.soup_args)
elif self.axis == AXIS_FOLLOWING:
# find the last descendant of this node
last = node
while (not isinstance(last, BeautifulSoup.NavigableString)) \
and (len(last.contents) > 0):
last = last.contents[-1]
found = last.findAllNext(**self.soup_args)
elif self.axis == AXIS_ANCESTOR:
found = node.findParents(**self.soup_args)
# this should only be active if there is a position predicate
# and the axis is not 'descendant'
if self.index is not None:
if found:
if len(found) > self.index:
found = [found[self.index]]
else:
found = []
if found:
for checker in self.checkers:
found = filter(checker, found)
result.extend(found)
return result
class PredicateFilter:
"""A callable class for filtering nodes.
"""
def __init__(self, name, arguments=None, value=None, negate=False):
self.name = name
self.arguments = arguments
self.negate = negate
if name == 'position':
self.__filter = self.__position
self.value = value
elif name == 'axis':
self.__filter = self.__axis
self.node_test = arguments
self.value = value
elif name == 'starts-with':
self.__filter = self.__starts_with
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:], args[1][1:-1])
else:
self.arguments = (False, args[0], args[1][1:-1])
elif name == 'string-length':
self.__filter = self.__string_length
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:])
else:
self.arguments = (False, args[0])
self.value = int(value)
else:
raise NotImplementedError("This XPath function is not implemented")
def __call__(self, node):
if self.negate:
return not self.__filter(node)
else:
return self.__filter(node)
def __position(self, node):
if isinstance(node, BeautifulSoup.NavigableString):
actual_position = len(node.findPreviousSiblings(text=True)) + 1
else:
actual_position = len(node.findPreviousSiblings(node.name)) + 1
return actual_position == self.value
def __axis(self, node):
if self.node_test == 'text()':
return node.string == self.value
else:
children = node.findAll(self.node_test, recursive=False)
if len(children) > 0 and self.value is None:
return True
for child in children:
if child.string == self.value:
return True
return False
def __starts_with(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
first = node[attribute_name]
return first.startswith(self.arguments[2])
elif self.arguments[1] == 'text()':
first = node.contents[0]
if isinstance(first, BeautifulSoup.NavigableString):
return first.startswith(self.arguments[2])
return False
def __string_length(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
value = node[attribute_name]
else:
value = None
elif self.arguments[1] == 'text()':
value = node.string
if value is not None:
return len(value) == self.value
return False
_paths = {}
_steps = {}
def get_path(path):
"""Utility for eliminating repeated parsings of the same paths and steps.
"""
if not _paths.has_key(path):
p = Path(path, parse=False)
steps = tokenize_path(path)
for step in steps:
if not _steps.has_key(step):
_steps[step] = PathStep(step)
p.steps.append(_steps[step])
_paths[path] = p
return _paths[path]
| Python |
"""
parser.http.bsouplxml.html module (imdb.parser.http package).
This module adapts the beautifulsoup interface to lxml.html module.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import _bsoup as BeautifulSoup
def fromstring(html_string):
"""Return a DOM representation of the string."""
return BeautifulSoup.BeautifulSoup(html_string,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES
).findChild(True)
| Python |
"""
parser.http package (imdb package).
This package provides the IMDbHTTPAccessSystem class used to access
IMDb's data through the web interface.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "http" or "web"
or "html" (this is the default).
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys
import logging
from urllib import FancyURLopener, quote_plus
from codecs import lookup
from imdb import IMDbBase, imdbURL_movie_main, imdbURL_person_main, \
imdbURL_character_main, imdbURL_company_main, \
imdbURL_keyword_main, imdbURL_find, imdbURL_top250, \
imdbURL_bottom100
from imdb.utils import analyze_title
from imdb._exceptions import IMDbDataAccessError, IMDbParserError
import searchMovieParser
import searchPersonParser
import searchCharacterParser
import searchCompanyParser
import searchKeywordParser
import movieParser
import personParser
import characterParser
import companyParser
import topBottomParser
# Logger for miscellaneous functions.
_aux_logger = logging.getLogger('imdbpy.parser.http.aux')
IN_GAE = False
try:
import google.appengine
IN_GAE = True
_aux_logger.info('IMDbPY is running in the Google App Engine environment')
except ImportError:
pass
class _ModuleProxy:
"""A proxy to instantiate and access parsers."""
def __init__(self, module, defaultKeys=None, oldParsers=False,
useModule=None, fallBackToNew=False):
"""Initialize a proxy for the given module; defaultKeys, if set,
muste be a dictionary of values to set for instanced objects."""
if oldParsers or fallBackToNew:
_aux_logger.warn('The old set of parsers was removed; falling ' \
'back to the new parsers.')
self.useModule = useModule
if defaultKeys is None:
defaultKeys = {}
self._defaultKeys = defaultKeys
self._module = module
def __getattr__(self, name):
"""Called only when no look-up is found."""
_sm = self._module
# Read the _OBJECTS dictionary to build the asked parser.
if name in _sm._OBJECTS:
_entry = _sm._OBJECTS[name]
# Initialize the parser.
kwds = {}
if self.useModule:
kwds = {'useModule': self.useModule}
parserClass = _entry[0][0]
obj = parserClass(**kwds)
attrsToSet = self._defaultKeys.copy()
attrsToSet.update(_entry[1] or {})
# Set attribute to the object.
for key in attrsToSet:
setattr(obj, key, attrsToSet[key])
setattr(self, name, obj)
return obj
return getattr(_sm, name)
PY_VERSION = sys.version_info[:2]
# The cookies for the "adult" search.
# Please don't mess with these account.
# Old 'IMDbPY' account.
_old_cookie_id = 'boM2bYxz9MCsOnH9gZ0S9QHs12NWrNdApxsls1Vb5/NGrNdjcHx3dUas10UASoAjVEvhAbGagERgOpNkAPvxdbfKwaV2ikEj9SzXY1WPxABmDKQwdqzwRbM+12NSeJFGUEx3F8as10WwidLzVshDtxaPIbP13NdjVS9UZTYqgTVGrNcT9vyXU1'
_old_cookie_uu = '3M3AXsquTU5Gur/Svik+ewflPm5Rk2ieY3BIPlLjyK3C0Dp9F8UoPgbTyKiGtZp4x1X+uAUGKD7BM2g+dVd8eqEzDErCoYvdcvGLvVLAen1y08hNQtALjVKAe+1hM8g9QbNonlG1/t4S82ieUsBbrSIQbq1yhV6tZ6ArvSbA7rgHc8n5AdReyAmDaJ5Wm/ee3VDoCnGj/LlBs2ieUZNorhHDKK5Q=='
# New 'IMDbPYweb' account.
_cookie_id = 'rH1jNAkjTlNXvHolvBVBsgaPICNZbNdjVjzFwzas9JRmusdjVoqBs/Hs12NR+1WFxEoR9bGKEDUg6sNlADqXwkas12N131Rwdb+UQNGKN8PWrNdjcdqBQVLq8mbGDHP3hqzxhbD692NQi9D0JjpBtRaPIbP1zNdjUOqENQYv1ADWrNcT9vyXU1'
_cookie_uu = 'su4/m8cho4c6HP+W1qgq6wchOmhnF0w+lIWvHjRUPJ6nRA9sccEafjGADJ6hQGrMd4GKqLcz2X4z5+w+M4OIKnRn7FpENH7dxDQu3bQEHyx0ZEyeRFTPHfQEX03XF+yeN1dsPpcXaqjUZAw+lGRfXRQEfz3RIX9IgVEffdBAHw2wQXyf9xdMPrQELw0QNB8dsffsqcdQemjPB0w+moLcPh0JrKrHJ9hjBzdMPpcXTH7XRwwOk='
# imdbpy2010 account.
#_cookie_id = 'QrCdxVi+L+WgqOLrQJJgBgRRXGInphxiBPU/YXSFDyExMFzCp6YcYgSVXyEUhS/xMID8wqemHGID4DlntwZ49vemP5UXsAxiJ4D6goSmHGIgNT9hMXBaRSF2vMS3phxB0bVfQiQlP1RxdrzhB6YcRHFASyIhQVowwXCKtDSlD2YhgRvxBsCKtGemHBKH9mxSI='
#_cookie_uu = 'oiEo2yoJFCA2Zbn/o7Z1LAPIwotAu6QdALv3foDb1x5F/tdrFY63XkSfty4kntS8Y8jkHSDLt3406+d+JThEilPI0mtTaOQdA/t2/iErp22jaLdeVU5ya4PIREpj7HFdpzhEHadcIAngSER50IoHDpD6Bz4Qy3b+UIhE/hBbhz5Q63ceA2hEvhPo5B0FnrL9Q8jkWjDIbA0Au3d+AOtnXoCIRL4Q28c+UOtnXpP4RL4T6OQdA+6ijUCI5B0AW2d+UOtnXpPYRL4T6OQdA8jkTUOYlC0A=='
class _FakeURLOpener(object):
"""Fake URLOpener object, used to return empty strings instead of
errors.
"""
def __init__(self, url, headers):
self.url = url
self.headers = headers
def read(self, *args, **kwds): return ''
def close(self, *args, **kwds): pass
def info(self, *args, **kwds): return self.headers
class IMDbURLopener(FancyURLopener):
"""Fetch web pages and handle errors."""
_logger = logging.getLogger('imdbpy.parser.http.urlopener')
def __init__(self, *args, **kwargs):
self._last_url = u''
FancyURLopener.__init__(self, *args, **kwargs)
# Headers to add to every request.
# XXX: IMDb's web server doesn't like urllib-based programs,
# so lets fake to be Mozilla.
# Wow! I'm shocked by my total lack of ethic! <g>
for header in ('User-Agent', 'User-agent', 'user-agent'):
self.del_header(header)
self.set_header('User-Agent', 'Mozilla/5.0')
# XXX: This class is used also to perform "Exact Primary
# [Title|Name]" searches, and so by default the cookie is set.
c_header = 'id=%s; uu=%s' % (_cookie_id, _cookie_uu)
self.set_header('Cookie', c_header)
def get_proxy(self):
"""Return the used proxy, or an empty string."""
return self.proxies.get('http', '')
def set_proxy(self, proxy):
"""Set the proxy."""
if not proxy:
if self.proxies.has_key('http'):
del self.proxies['http']
else:
if not proxy.lower().startswith('http://'):
proxy = 'http://%s' % proxy
self.proxies['http'] = proxy
def set_header(self, header, value, _overwrite=True):
"""Set a default header."""
if _overwrite:
self.del_header(header)
self.addheaders.append((header, value))
def del_header(self, header):
"""Remove a default header."""
for index in xrange(len(self.addheaders)):
if self.addheaders[index][0] == header:
del self.addheaders[index]
break
def retrieve_unicode(self, url, size=-1):
"""Retrieves the given URL, and returns a unicode string,
trying to guess the encoding of the data (assuming latin_1
by default)"""
encode = None
try:
if size != -1:
self.set_header('Range', 'bytes=0-%d' % size)
uopener = self.open(url)
kwds = {}
if PY_VERSION > (2, 3) and not IN_GAE:
kwds['size'] = size
content = uopener.read(**kwds)
self._last_url = uopener.url
# Maybe the server is so nice to tell us the charset...
server_encode = uopener.info().getparam('charset')
# Otherwise, look at the content-type HTML meta tag.
if server_encode is None and content:
first_bytes = content[:512]
begin_h = first_bytes.find('text/html; charset=')
if begin_h != -1:
end_h = first_bytes[19+begin_h:].find('"')
if end_h != -1:
server_encode = first_bytes[19+begin_h:19+begin_h+end_h]
if server_encode:
try:
if lookup(server_encode):
encode = server_encode
except (LookupError, ValueError, TypeError):
pass
uopener.close()
if size != -1:
self.del_header('Range')
self.close()
except IOError, e:
if size != -1:
# Ensure that the Range header is removed.
self.del_header('Range')
raise IMDbDataAccessError, {'errcode': e.errno,
'errmsg': str(e.strerror),
'url': url,
'proxy': self.get_proxy(),
'exception type': 'IOError',
'original exception': e}
if encode is None:
encode = 'latin_1'
# The detection of the encoding is error prone...
self._logger.warn('Unable to detect the encoding of the retrieved '
'page [%s]; falling back to default latin1.', encode)
##print unicode(content, encode, 'replace').encode('utf8')
return unicode(content, encode, 'replace')
def http_error_default(self, url, fp, errcode, errmsg, headers):
if errcode == 404:
self._logger.warn('404 code returned for %s: %s (headers: %s)',
url, errmsg, headers)
return _FakeURLOpener(url, headers)
raise IMDbDataAccessError, {'url': 'http:%s' % url,
'errcode': errcode,
'errmsg': errmsg,
'headers': headers,
'error type': 'http_error_default',
'proxy': self.get_proxy()}
def open_unknown(self, fullurl, data=None):
raise IMDbDataAccessError, {'fullurl': fullurl,
'data': str(data),
'error type': 'open_unknown',
'proxy': self.get_proxy()}
def open_unknown_proxy(self, proxy, fullurl, data=None):
raise IMDbDataAccessError, {'proxy': str(proxy),
'fullurl': fullurl,
'error type': 'open_unknown_proxy',
'data': str(data)}
class IMDbHTTPAccessSystem(IMDbBase):
"""The class used to access IMDb's data through the web."""
accessSystem = 'http'
_http_logger = logging.getLogger('imdbpy.parser.http')
def __init__(self, isThin=0, adultSearch=1, proxy=-1, oldParsers=False,
fallBackToNew=False, useModule=None, cookie_id=-1,
cookie_uu=None, *arguments, **keywords):
"""Initialize the access system."""
IMDbBase.__init__(self, *arguments, **keywords)
self.urlOpener = IMDbURLopener()
# When isThin is set, we're parsing the "maindetails" page
# of a movie (instead of the "combined" page) and movie/person
# references are not collected if no defaultModFunct is provided.
self.isThin = isThin
self._getRefs = True
self._mdparse = False
if isThin:
if self.accessSystem == 'http':
self.accessSystem = 'httpThin'
self._mdparse = True
if self._defModFunct is None:
self._getRefs = False
from imdb.utils import modNull
self._defModFunct = modNull
self.do_adult_search(adultSearch)
if cookie_id != -1:
if cookie_id is None:
self.del_cookies()
elif cookie_uu is not None:
self.set_cookies(cookie_id, cookie_uu)
if proxy != -1:
self.set_proxy(proxy)
if useModule is not None:
if not isinstance(useModule, (list, tuple)) and ',' in useModule:
useModule = useModule.split(',')
_def = {'_modFunct': self._defModFunct, '_as': self.accessSystem}
# Proxy objects.
self.smProxy = _ModuleProxy(searchMovieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.spProxy = _ModuleProxy(searchPersonParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scProxy = _ModuleProxy(searchCharacterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.scompProxy = _ModuleProxy(searchCompanyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.skProxy = _ModuleProxy(searchKeywordParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.mProxy = _ModuleProxy(movieParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.pProxy = _ModuleProxy(personParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.cProxy = _ModuleProxy(characterParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.compProxy = _ModuleProxy(companyParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
self.topBottomProxy = _ModuleProxy(topBottomParser, defaultKeys=_def,
oldParsers=oldParsers, useModule=useModule,
fallBackToNew=fallBackToNew)
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
try:
return '%07d' % int(movieID)
except ValueError, e:
raise IMDbParserError, 'invalid movieID "%s": %s' % (movieID, e)
def _normalize_personID(self, personID):
"""Normalize the given personID."""
try:
return '%07d' % int(personID)
except ValueError, e:
raise IMDbParserError, 'invalid personID "%s": %s' % (personID, e)
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
try:
return '%07d' % int(characterID)
except ValueError, e:
raise IMDbParserError, 'invalid characterID "%s": %s' % \
(characterID, e)
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
try:
return '%07d' % int(companyID)
except ValueError, e:
raise IMDbParserError, 'invalid companyID "%s": %s' % \
(companyID, e)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID; in this implementation
the movieID _is_ the imdbID.
"""
return movieID
def get_imdbPersonID(self, personID):
"""Translate a personID in an imdbID; in this implementation
the personID _is_ the imdbID.
"""
return personID
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in an imdbID; in this implementation
the characterID _is_ the imdbID.
"""
return characterID
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in an imdbID; in this implementation
the companyID _is_ the imdbID.
"""
return companyID
def get_proxy(self):
"""Return the used proxy or an empty string."""
return self.urlOpener.get_proxy()
def set_proxy(self, proxy):
"""Set the web proxy to use.
It should be a string like 'http://localhost:8080/'; if the
string is empty, no proxy will be used.
If set, the value of the environment variable HTTP_PROXY is
automatically used.
"""
self.urlOpener.set_proxy(proxy)
def set_cookies(self, cookie_id, cookie_uu):
"""Set a cookie to access an IMDb's account."""
c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
self.urlOpener.set_header('Cookie', c_header)
def del_cookies(self):
"""Remove the used cookie."""
self.urlOpener.del_header('Cookie')
def do_adult_search(self, doAdult,
cookie_id=_cookie_id, cookie_uu=_cookie_uu):
"""If doAdult is true, 'adult' movies are included in the
search results; cookie_id and cookie_uu are optional
parameters to select a specific account (see your cookie
or cookies.txt file."""
if doAdult:
self.set_cookies(cookie_id, cookie_uu)
#c_header = 'id=%s; uu=%s' % (cookie_id, cookie_uu)
#self.urlOpener.set_header('Cookie', c_header)
else:
self.urlOpener.del_header('Cookie')
def _retrieve(self, url, size=-1):
"""Retrieve the given URL."""
##print url
self._http_logger.debug('fetching url %s (size: %d)', url, size)
return self.urlOpener.retrieve_unicode(url, size=size)
def _get_search_content(self, kind, ton, results):
"""Retrieve the web page for a given search.
kind can be 'tt' (for titles), 'nm' (for names),
'char' (for characters) or 'co' (for companies).
ton is the title or the name to search.
results is the maximum number of results to be retrieved."""
if isinstance(ton, unicode):
ton = ton.encode('utf-8')
##params = 'q=%s&%s=on&mx=%s' % (quote_plus(ton), kind, str(results))
params = 's=%s;mx=%s;q=%s' % (kind, str(results), quote_plus(ton))
if kind == 'ep':
params = params.replace('s=ep;', 's=tt;ttype=ep;', 1)
cont = self._retrieve(imdbURL_find % params)
#print 'URL:', imdbURL_find % params
if cont.find('Your search returned more than') == -1 or \
cont.find("displayed the exact matches") == -1:
return cont
# The retrieved page contains no results, because too many
# titles or names contain the string we're looking for.
params = 's=%s;q=%s;lm=0' % (kind, quote_plus(ton))
size = 22528 + results * 512
return self._retrieve(imdbURL_find % params, size=size)
def _search_movie(self, title, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'tt', 'q': title})
##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})
##params = 'q=%s&tt=on&mx=%s' % (quote_plus(title), str(results))
##cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('tt', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def _search_episode(self, title, results):
t_dict = analyze_title(title)
if t_dict['kind'] == 'episode':
title = t_dict['title']
cont = self._get_search_content('ep', title, results)
return self.smProxy.search_movie_parser.parse(cont, results=results)['data']
def get_movie_main(self, movieID):
if not self.isThin:
cont = self._retrieve(imdbURL_movie_main % movieID + 'combined')
else:
cont = self._retrieve(imdbURL_movie_main % movieID + 'maindetails')
return self.mProxy.movie_parser.parse(cont, mdparse=self._mdparse)
def get_movie_full_credits(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'fullcredits')
return self.mProxy.movie_parser.parse(cont)
def get_movie_plot(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'plotsummary')
return self.mProxy.plot_parser.parse(cont, getRefs=self._getRefs)
def get_movie_awards(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'awards')
return self.mProxy.movie_awards_parser.parse(cont)
def get_movie_taglines(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'taglines')
return self.mProxy.taglines_parser.parse(cont)
def get_movie_keywords(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'keywords')
return self.mProxy.keywords_parser.parse(cont)
def get_movie_alternate_versions(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'alternateversions')
return self.mProxy.alternateversions_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_crazy_credits(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'crazycredits')
return self.mProxy.crazycredits_parser.parse(cont,
getRefs=self._getRefs)
def get_movie_goofs(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'goofs')
return self.mProxy.goofs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_quotes(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'quotes')
return self.mProxy.quotes_parser.parse(cont, getRefs=self._getRefs)
def get_movie_release_dates(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'releaseinfo')
ret = self.mProxy.releasedates_parser.parse(cont)
ret['info sets'] = ('release dates', 'akas')
return ret
get_movie_akas = get_movie_release_dates
def get_movie_vote_details(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'ratings')
return self.mProxy.ratings_parser.parse(cont)
def get_movie_official_sites(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'officialsites')
return self.mProxy.officialsites_parser.parse(cont)
def get_movie_trivia(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'trivia')
return self.mProxy.trivia_parser.parse(cont, getRefs=self._getRefs)
def get_movie_connections(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'movieconnections')
return self.mProxy.connections_parser.parse(cont)
def get_movie_technical(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'technical')
return self.mProxy.tech_parser.parse(cont)
def get_movie_business(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'business')
return self.mProxy.business_parser.parse(cont, getRefs=self._getRefs)
def get_movie_literature(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'literature')
return self.mProxy.literature_parser.parse(cont)
def get_movie_locations(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'locations')
return self.mProxy.locations_parser.parse(cont)
def get_movie_soundtrack(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'soundtrack')
return self.mProxy.soundtrack_parser.parse(cont)
def get_movie_dvd(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'dvd')
return self.mProxy.dvd_parser.parse(cont, getRefs=self._getRefs)
def get_movie_recommendations(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'recommendations')
return self.mProxy.rec_parser.parse(cont)
def get_movie_external_reviews(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'externalreviews')
return self.mProxy.externalrev_parser.parse(cont)
def get_movie_newsgroup_reviews(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'newsgroupreviews')
return self.mProxy.newsgrouprev_parser.parse(cont)
def get_movie_misc_sites(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'miscsites')
return self.mProxy.misclinks_parser.parse(cont)
def get_movie_sound_clips(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'soundsites')
return self.mProxy.soundclips_parser.parse(cont)
def get_movie_video_clips(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'videosites')
return self.mProxy.videoclips_parser.parse(cont)
def get_movie_photo_sites(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'photosites')
return self.mProxy.photosites_parser.parse(cont)
def get_movie_news(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'news')
return self.mProxy.news_parser.parse(cont, getRefs=self._getRefs)
def get_movie_amazon_reviews(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'amazon')
return self.mProxy.amazonrev_parser.parse(cont)
def get_movie_guests(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'epcast')
return self.mProxy.episodes_cast_parser.parse(cont)
get_movie_episodes_cast = get_movie_guests
def get_movie_merchandising_links(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'sales')
return self.mProxy.sales_parser.parse(cont)
def get_movie_episodes(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'episodes')
data_d = self.mProxy.episodes_parser.parse(cont)
# set movie['episode of'].movieID for every episode of the series.
if data_d.get('data', {}).has_key('episodes'):
nr_eps = 0
for season in data_d['data']['episodes'].values():
for episode in season.values():
episode['episode of'].movieID = movieID
nr_eps += 1
# Number of episodes.
if nr_eps:
data_d['data']['number of episodes'] = nr_eps
return data_d
def get_movie_episodes_rating(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'epdate')
data_d = self.mProxy.eprating_parser.parse(cont)
# set movie['episode of'].movieID for every episode.
if data_d.get('data', {}).has_key('episodes rating'):
for item in data_d['data']['episodes rating']:
episode = item['episode']
episode['episode of'].movieID = movieID
return data_d
def get_movie_faqs(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'faq')
return self.mProxy.movie_faqs_parser.parse(cont, getRefs=self._getRefs)
def get_movie_airing(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'tvschedule')
return self.mProxy.airing_parser.parse(cont)
get_movie_tv_schedule = get_movie_airing
def get_movie_synopsis(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'synopsis')
return self.mProxy.synopsis_parser.parse(cont)
def get_movie_parents_guide(self, movieID):
cont = self._retrieve(imdbURL_movie_main % movieID + 'parentalguide')
return self.mProxy.parentsguide_parser.parse(cont)
def _search_person(self, name, results):
# The URL of the query.
# XXX: To retrieve the complete results list:
# params = urllib.urlencode({'more': 'nm', 'q': name})
##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})
#params = 'q=%s&nm=on&mx=%s' % (quote_plus(name), str(results))
#cont = self._retrieve(imdbURL_find % params)
cont = self._get_search_content('nm', name, results)
return self.spProxy.search_person_parser.parse(cont, results=results)['data']
def get_person_main(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'maindetails')
ret = self.pProxy.maindetails_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
def get_person_filmography(self, personID):
return self.get_person_main(personID)
def get_person_biography(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'bio')
return self.pProxy.bio_parser.parse(cont, getRefs=self._getRefs)
def get_person_awards(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'awards')
return self.pProxy.person_awards_parser.parse(cont)
def get_person_other_works(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'otherworks')
return self.pProxy.otherworks_parser.parse(cont, getRefs=self._getRefs)
#def get_person_agent(self, personID):
# cont = self._retrieve(imdbURL_person_main % personID + 'agent')
# return self.pProxy.agent_parser.parse(cont)
def get_person_publicity(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'publicity')
return self.pProxy.publicity_parser.parse(cont)
def get_person_official_sites(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'officialsites')
return self.pProxy.person_officialsites_parser.parse(cont)
def get_person_news(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'news')
return self.pProxy.news_parser.parse(cont)
def get_person_episodes(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'filmoseries')
return self.pProxy.person_series_parser.parse(cont)
def get_person_merchandising_links(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'forsale')
return self.pProxy.sales_parser.parse(cont)
def get_person_genres_links(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'filmogenre')
return self.pProxy.person_genres_parser.parse(cont)
def get_person_keywords_links(self, personID):
cont = self._retrieve(imdbURL_person_main % personID + 'filmokey')
return self.pProxy.person_keywords_parser.parse(cont)
def _search_character(self, name, results):
cont = self._get_search_content('char', name, results)
return self.scProxy.search_character_parser.parse(cont, results=results)['data']
def get_character_main(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID)
ret = self.cProxy.character_main_parser.parse(cont)
ret['info sets'] = ('main', 'filmography')
return ret
get_character_filmography = get_character_main
def get_character_biography(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID + 'bio')
return self.cProxy.character_bio_parser.parse(cont,
getRefs=self._getRefs)
def get_character_episodes(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID +
'filmoseries')
return self.cProxy.character_series_parser.parse(cont)
def get_character_quotes(self, characterID):
cont = self._retrieve(imdbURL_character_main % characterID + 'quotes')
return self.cProxy.character_quotes_parser.parse(cont,
getRefs=self._getRefs)
def _search_company(self, name, results):
cont = self._get_search_content('co', name, results)
url = self.urlOpener._last_url
return self.scompProxy.search_company_parser.parse(cont, url=url,
results=results)['data']
def get_company_main(self, companyID):
cont = self._retrieve(imdbURL_company_main % companyID)
ret = self.compProxy.company_main_parser.parse(cont)
return ret
def _search_keyword(self, keyword, results):
# XXX: the IMDb web server seems to have some serious problem with
# non-ascii keyword.
# E.g.: http://akas.imdb.com/keyword/fianc%E9/
# will return a 500 Internal Server Error: Redirect Recursion.
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._get_search_content('kw', keyword, results)
except IMDbDataAccessError:
self._http_logger.warn('unable to search for keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_keyword_parser.parse(cont, results=results)['data']
def _get_keyword(self, keyword, results):
keyword = keyword.encode('utf8', 'ignore')
try:
cont = self._retrieve(imdbURL_keyword_main % keyword)
except IMDbDataAccessError:
self._http_logger.warn('unable to get keyword %s', keyword,
exc_info=True)
return []
return self.skProxy.search_moviekeyword_parser.parse(cont, results=results)['data']
def _get_top_bottom_movies(self, kind):
if kind == 'top':
parser = self.topBottomProxy.top250_parser
url = imdbURL_top250
elif kind == 'bottom':
parser = self.topBottomProxy.bottom100_parser
url = imdbURL_bottom100
else:
return []
cont = self._retrieve(url)
return parser.parse(cont)['data']
| Python |
"""
parser.http.companyParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a company.
E.g., for "Columbia Pictures [us]" the referred page would be:
main details: http://akas.imdb.com/company/co0071509/
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from utils import build_movie, Attribute, Extractor, DOMParserBase, \
analyze_imdbid
from imdb.utils import analyze_company_name
class DOMCompanyParser(DOMParserBase):
"""Parser for the main page of a given company.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
cparser = DOMCompanyParser()
result = cparser.parse(company_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='name',
path="//title",
attrs=Attribute(key='name',
path="./text()",
postprocess=lambda x: \
analyze_company_name(x, stripNotes=True))),
Extractor(label='filmography',
group="//b/a[@name]",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::ol[1]/li",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a[1]/@href",
'title': "./a[1]/text()",
'year': "./text()[1]"
},
postprocess=lambda x:
build_movie(u'%s %s' % \
(x.get('title'), x.get('year').strip()),
movieID=analyze_imdbid(x.get('link') or u''),
_parsingCompany=True))),
]
preprocessors = [
(re.compile('(<b><a name=)', re.I), r'</p>\1')
]
def postprocess_data(self, data):
for key in data.keys():
new_key = key.replace('company', 'companies')
new_key = new_key.replace('other', 'miscellaneous')
new_key = new_key.replace('distributor', 'distributors')
if new_key != key:
data[new_key] = data[key]
del data[key]
return data
_OBJECTS = {
'company_main_parser': ((DOMCompanyParser,), None)
}
| Python |
"""
parser.http.characterParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a character.
E.g., for "Jesse James" the referred pages would be:
main details: http://www.imdb.com/character/ch0000001/
biography: http://www.imdb.com/character/ch0000001/bio
...and so on...
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from utils import Attribute, Extractor, DOMParserBase, build_movie, \
analyze_imdbid
from personParser import DOMHTMLMaindetailsParser
from imdb.Movie import Movie
_personIDs = re.compile(r'/name/nm([0-9]{7})')
class DOMHTMLCharacterMaindetailsParser(DOMHTMLMaindetailsParser):
"""Parser for the "filmography" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bparser = DOMHTMLCharacterMaindetailsParser()
result = bparser.parse(character_biography_html_string)
"""
_containsObjects = True
_film_attrs = [Attribute(key=None,
multi=True,
path={
'link': "./a[1]/@href",
'title': ".//text()",
'status': "./i/a//text()",
'roleID': "./a/@href"
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
movieID=analyze_imdbid(x.get('link') or u''),
roleID=_personIDs.findall(x.get('roleID') or u''),
status=x.get('status') or None,
_parsingCharacter=True))]
extractors = [
Extractor(label='title',
path="//title",
attrs=Attribute(key='name',
path="./text()",
postprocess=lambda x: \
x.replace(' (Character)', '').replace(
'- Filmography by type', '').strip())),
Extractor(label='headshot',
path="//a[@name='headshot']",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='akas',
path="//div[h5='Alternate Names:']",
attrs=Attribute(key='akas',
path="./div//text()",
postprocess=lambda x: x.strip().split(' / '))),
Extractor(label='filmography',
path="//div[@class='filmo'][not(h5)]/ol/li",
attrs=_film_attrs),
Extractor(label='filmography sections',
group="//div[@class='filmo'][h5]",
group_key="./h5/a/text()",
group_key_normalize=lambda x: x.lower()[:-1],
path="./ol/li",
attrs=_film_attrs),
]
preprocessors = [
# Check that this doesn't cut "status"...
(re.compile(r'<br>(\.\.\.| ).+?</li>', re.I | re.M), '</li>')]
class DOMHTMLCharacterBioParser(DOMParserBase):
"""Parser for the "biography" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bparser = DOMHTMLCharacterBioParser()
result = bparser.parse(character_biography_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='introduction',
path="//div[@id='_intro']",
attrs=Attribute(key='introduction',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='biography',
path="//span[@class='_biography']",
attrs=Attribute(key='biography',
multi=True,
path={
'info': "./preceding-sibling::h4[1]//text()",
'text': ".//text()"
},
postprocess=lambda x: u'%s: %s' % (
x.get('info').strip(),
x.get('text').replace('\n',
' ').replace('||', '\n\n').strip()))),
]
preprocessors = [
(re.compile('(<div id="swiki.2.3.1">)', re.I), r'\1<div id="_intro">'),
(re.compile('(<a name="history">)\s*(<table .*?</table>)',
re.I | re.DOTALL),
r'</div>\2\1</a>'),
(re.compile('(<a name="[^"]+">)(<h4>)', re.I), r'</span>\1</a>\2'),
(re.compile('(</h4>)</a>', re.I), r'\1<span class="_biography">'),
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('\|\|\n', re.I), r'</span>'),
]
class DOMHTMLCharacterQuotesParser(DOMParserBase):
"""Parser for the "quotes" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLCharacterQuotesParser()
result = qparser.parse(character_quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='charquotes',
group="//h5",
group_key="./a/text()",
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path={'txt': ".//text()",
'movieID': ".//a[1]/@href"},
postprocess=lambda x: (analyze_imdbid(x['movieID']),
x['txt'].strip().replace(': ',
': ').replace(': ', ': ').split('||'))))
]
preprocessors = [
(re.compile('(</h5>)', re.I), r'\1<div>'),
(re.compile('\s*<br/><br/>\s*', re.I), r'||'),
(re.compile('\|\|\s*(<hr/>)', re.I), r'</div>\1'),
(re.compile('\s*<br/>\s*', re.I), r'::')
]
def postprocess_data(self, data):
if not data:
return {}
newData = {}
for title in data:
movieID, quotes = data[title]
if movieID is None:
movie = title
else:
movie = Movie(title=title, movieID=movieID,
accessSystem=self._as, modFunct=self._modFunct)
newData[movie] = [quote.split('::') for quote in quotes]
return {'quotes': newData}
from personParser import DOMHTMLSeriesParser
_OBJECTS = {
'character_main_parser': ((DOMHTMLCharacterMaindetailsParser,),
{'kind': 'character'}),
'character_series_parser': ((DOMHTMLSeriesParser,), None),
'character_bio_parser': ((DOMHTMLCharacterBioParser,), None),
'character_quotes_parser': ((DOMHTMLCharacterQuotesParser,), None)
}
| Python |
"""
parser.http.searchCharacterParser module (imdb package).
This module provides the HTMLSearchCharacterParser class (and the
search_character_parser instance), used to parse the results of a search
for a given character.
E.g., when searching for the name "Jesse James", the parsed page would be:
http://akas.imdb.com/find?s=Characters;mx=20;q=Jesse+James
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCharacterParser(DOMBasicMovieParser):
"""Simply get the name of a character and the imdbID.
It's used by the DOMHTMLSearchCharacterParser class to return a result
for a direct match (when a search on IMDb results in a single
character, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(x or u'', canonical=False)
class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCharacterParser
_notDirectHitTitle = '<title>imdb search'
_titleBuilder = lambda self, x: build_name(x, canonical=False)
_linkPrefix = '/character/ch'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
{'name': x.get('name')}
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/character/ch')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_character_parser': ((DOMHTMLSearchCharacterParser,),
{'kind': 'character', '_basic_parser': DOMBasicCharacterParser})
}
| Python |
"""
parser.mobile package (imdb package).
This package provides the IMDbMobileAccessSystem class used to access
IMDb's data for mobile systems.
the imdb.IMDb function will return an instance of this class when
called with the 'accessSystem' argument set to "mobile".
Copyright 2005-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import logging
from urllib import unquote
from imdb import imdbURL_movie_main, imdbURL_person_main, imdbURL_character_main
from imdb.Movie import Movie
from imdb.utils import analyze_title, analyze_name, canonicalName, \
date_and_notes
from imdb._exceptions import IMDbDataAccessError
from imdb.parser.http import IMDbHTTPAccessSystem
from imdb.parser.http.utils import subXMLRefs, subSGMLRefs, build_person, \
build_movie, re_spaces
# XXX NOTE: the first version of this module was heavily based on
# regular expressions. This new version replace regexps with
# find() strings' method calls; despite being less flexible, it
# seems to be at least as fast and, hopefully, much more
# lightweight. Yes: the regexp-based version was too heavyweight
# for systems with very limited CPU power and memory footprint.
re_spacessub = re_spaces.sub
# Strip html.
re_unhtml = re.compile(r'<.+?>')
re_unhtmlsub = re_unhtml.sub
# imdb person or movie ids.
re_imdbID = re.compile(r'(?<=nm|tt|ch)([0-9]{7})\b')
# movie AKAs.
re_makas = re.compile('(<p class="find-aka">.*?</p>)')
# Remove episode numbers.
re_filmo_episodes = re.compile('<div class="filmo-episodes">.*?</div>',
re.M | re.I)
def _unHtml(s):
"""Return a string without tags and no multiple spaces."""
return subSGMLRefs(re_spacessub(' ', re_unhtmlsub('', s)).strip())
_inttype = type(0)
def _getTagsWith(s, cont, toClosure=False, maxRes=None):
"""Return the html tags in the 's' string containing the 'cont'
string; if toClosure is True, everything between the opening
tag and the closing tag is returned."""
lres = []
bi = s.find(cont)
if bi != -1:
btag = s[:bi].rfind('<')
if btag != -1:
if not toClosure:
etag = s[bi+1:].find('>')
if etag != -1:
endidx = bi+2+etag
lres.append(s[btag:endidx])
if maxRes is not None and len(lres) >= maxRes: return lres
lres += _getTagsWith(s[endidx:], cont,
toClosure=toClosure)
else:
spaceidx = s[btag:].find(' ')
if spaceidx != -1:
ctag = '</%s>' % s[btag+1:btag+spaceidx]
closeidx = s[bi:].find(ctag)
if closeidx != -1:
endidx = bi+closeidx+len(ctag)
lres.append(s[btag:endidx])
if maxRes is not None and len(lres) >= maxRes:
return lres
lres += _getTagsWith(s[endidx:], cont,
toClosure=toClosure)
return lres
def _findBetween(s, begins, ends, beginindx=0, maxRes=None, lres=None):
"""Return the list of strings from the 's' string which are included
between the 'begins' and 'ends' strings."""
if lres is None:
lres = []
bi = s.find(begins, beginindx)
if bi != -1:
lbegins = len(begins)
if isinstance(ends, (list, tuple)):
eset = [s.find(end, bi+lbegins) for end in ends]
eset[:] = [x for x in eset if x != -1]
if not eset: ei = -1
else: ei = min(eset)
else:
ei = s.find(ends, bi+lbegins)
if ei != -1:
match = s[bi+lbegins:ei]
lres.append(match)
if maxRes is not None and len(lres) >= maxRes: return lres
_findBetween(s, begins, ends, beginindx=ei, maxRes=maxRes,
lres=lres)
return lres
class IMDbMobileAccessSystem(IMDbHTTPAccessSystem):
"""The class used to access IMDb's data through the web for
mobile terminals."""
accessSystem = 'mobile'
_mobile_logger = logging.getLogger('imdbpy.parser.mobile')
def __init__(self, isThin=1, *arguments, **keywords):
self.accessSystem = 'mobile'
IMDbHTTPAccessSystem.__init__(self, isThin, *arguments, **keywords)
def _clean_html(self, html):
"""Normalize the retrieve html."""
html = re_spaces.sub(' ', html)
# Remove silly » chars.
html = html.replace(' »', '')
return subXMLRefs(html)
def _mretrieve(self, url, size=-1):
"""Retrieve an html page and normalize it."""
cont = self._retrieve(url, size=size)
return self._clean_html(cont)
def _getPersons(self, s, sep='<br/>'):
"""Return a list of Person objects, from the string s; items
are assumed to be separated by the sep string."""
names = s.split(sep)
pl = []
plappend = pl.append
counter = 1
for name in names:
pid = re_imdbID.findall(name)
if not pid: continue
characters = _getTagsWith(name, 'class="char"',
toClosure=True, maxRes=1)
chpids = []
if characters:
for ch in characters[0].split(' / '):
chid = re_imdbID.findall(ch)
if not chid:
chpids.append(None)
else:
chpids.append(chid[-1])
if not chpids:
chpids = None
elif len(chpids) == 1:
chpids = chpids[0]
name = _unHtml(name)
# Catch unclosed tags.
gt_indx = name.find('>')
if gt_indx != -1:
name = name[gt_indx+1:].lstrip()
if not name: continue
if name.endswith('...'):
name = name[:-3]
p = build_person(name, personID=str(pid[0]), billingPos=counter,
modFunct=self._defModFunct, roleID=chpids,
accessSystem=self.accessSystem)
plappend(p)
counter += 1
return pl
def _search_movie(self, title, results):
##params = urllib.urlencode({'tt': 'on','mx': str(results),'q': title})
##params = 'q=%s&tt=on&mx=%s' % (urllib.quote_plus(title), str(results))
##cont = self._mretrieve(imdbURL_search % params)
cont = subXMLRefs(self._get_search_content('tt', title, results))
title = _findBetween(cont, '<title>', '</title>', maxRes=1)
res = []
if not title:
self._mobile_logger.error('no title tag searching for movie %s',
title)
return res
tl = title[0].lower()
if not tl.startswith('imdb title'):
# a direct hit!
title = _unHtml(title[0])
mid = None
midtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1)
if midtag:
mid = _findBetween(midtag[0], '/title/tt', '/', maxRes=1)
if not (mid and title):
self._mobile_logger.error('no direct hit title/movieID for' \
' title %s', title)
return res
if cont.find('<span class="tv-extra">TV mini-series</span>') != -1:
title += ' (mini)'
res[:] = [(str(mid[0]), analyze_title(title))]
else:
# XXX: this results*3 prevents some recursion errors, but...
# it's not exactly understandable (i.e.: why 'results' is
# not enough to get all the results?)
lis = _findBetween(cont, 'td valign="top">', '</td>',
maxRes=results*3)
for li in lis:
akas = re_makas.findall(li)
for idx, aka in enumerate(akas):
aka = aka.replace('" - ', '::', 1)
aka = _unHtml(aka)
if aka.startswith('aka "'):
aka = aka[5:].strip()
if aka[-1] == '"':
aka = aka[:-1]
akas[idx] = aka
imdbid = re_imdbID.findall(li)
li = re_makas.sub('', li)
mtitle = _unHtml(li)
if not (imdbid and mtitle):
self._mobile_logger.debug('no title/movieID parsing' \
' %s searching for title %s', li,
title)
continue
mtitle = mtitle.replace('(TV mini-series)', '(mini)')
resd = analyze_title(mtitle)
if akas:
resd['akas'] = akas
res.append((str(imdbid[0]), resd))
return res
def get_movie_main(self, movieID):
cont = self._mretrieve(imdbURL_movie_main % movieID + 'maindetails')
title = _findBetween(cont, '<title>', '</title>', maxRes=1)
if not title:
raise IMDbDataAccessError, 'unable to get movieID "%s"' % movieID
title = _unHtml(title[0])
if cont.find('<span class="tv-extra">TV mini-series</span>') != -1:
title += ' (mini)'
d = analyze_title(title)
kind = d.get('kind')
tv_series = _findBetween(cont, 'TV Series:</h5>', '</a>', maxRes=1)
if tv_series: mid = re_imdbID.findall(tv_series[0])
else: mid = None
if tv_series and mid:
s_title = _unHtml(tv_series[0])
s_data = analyze_title(s_title)
m = Movie(movieID=str(mid[0]), data=s_data,
accessSystem=self.accessSystem,
modFunct=self._defModFunct)
d['kind'] = kind = u'episode'
d['episode of'] = m
if kind in ('tv series', 'tv mini series'):
years = _findBetween(cont, '<h1>', '</h1>', maxRes=1)
if years:
years[:] = _findBetween(years[0], 'TV series', '</span>',
maxRes=1)
if years:
d['series years'] = years[0].strip()
air_date = _findBetween(cont, 'Original Air Date:</h5>', '</div>',
maxRes=1)
if air_date:
air_date = air_date[0]
vi = air_date.find('(')
if vi != -1:
date = _unHtml(air_date[:vi]).strip()
if date != '????':
d['original air date'] = date
air_date = air_date[vi:]
season = _findBetween(air_date, 'Season', ',', maxRes=1)
if season:
season = season[0].strip()
try: season = int(season)
except: pass
if season or type(season) is _inttype:
d['season'] = season
episode = _findBetween(air_date, 'Episode', ')', maxRes=1)
if episode:
episode = episode[0].strip()
try: episode = int(episode)
except: pass
if episode or type(season) is _inttype:
d['episode'] = episode
direct = _findBetween(cont, '<h5>Director', ('</div>', '<br/> <br/>'),
maxRes=1)
if direct:
direct = direct[0]
h5idx = direct.find('/h5>')
if h5idx != -1:
direct = direct[h5idx+4:]
direct = self._getPersons(direct)
if direct: d['director'] = direct
if kind in ('tv series', 'tv mini series', 'episode'):
if kind != 'episode':
seasons = _findBetween(cont, 'Seasons:</h5>', '</div>',
maxRes=1)
if seasons:
d['number of seasons'] = seasons[0].count('|') + 1
creator = _findBetween(cont, 'Created by</h5>', ('class="tn15more"',
'</div>',
'<br/> <br/>'),
maxRes=1)
if not creator:
# They change 'Created by' to 'Creator' and viceversa
# from time to time...
# XXX: is 'Creators' also used?
creator = _findBetween(cont, 'Creator:</h5>',
('class="tn15more"', '</div>',
'<br/> <br/>'), maxRes=1)
if creator:
creator = creator[0]
if creator.find('tn15more'): creator = '%s>' % creator
creator = self._getPersons(creator)
if creator: d['creator'] = creator
writers = _findBetween(cont, '<h5>Writer', ('</div>', '<br/> <br/>'),
maxRes=1)
if writers:
writers = writers[0]
h5idx = writers.find('/h5>')
if h5idx != -1:
writers = writers[h5idx+4:]
writers = self._getPersons(writers)
if writers: d['writer'] = writers
cvurl = _getTagsWith(cont, 'name="poster"', toClosure=True, maxRes=1)
if cvurl:
cvurl = _findBetween(cvurl[0], 'src="', '"', maxRes=1)
if cvurl: d['cover url'] = cvurl[0]
genres = _findBetween(cont, 'href="/Sections/Genres/', '/')
if genres:
d['genres'] = list(set(genres))
ur = _findBetween(cont, '<div class="starbar-meta">', '</div>',
maxRes=1)
if ur:
rat = _findBetween(ur[0], '<b>', '</b>', maxRes=1)
if rat:
teni = rat[0].find('/10')
if teni != -1:
rat = rat[0][:teni]
try:
rat = float(rat.strip())
d['rating'] = rat
except ValueError:
self._mobile_logger.warn('wrong rating: %s', rat)
vi = ur[0].rfind('tn15more">')
if vi != -1 and ur[0][vi+10:].find('await') == -1:
try:
votes = _unHtml(ur[0][vi+10:]).replace('votes', '').strip()
votes = int(votes.replace(',', ''))
d['votes'] = votes
except ValueError:
self._mobile_logger.warn('wrong votes: %s', ur)
top250 = _findBetween(cont, 'href="/chart/top?', '</a>', maxRes=1)
if top250:
fn = top250[0].rfind('#')
if fn != -1:
try:
td = int(top250[0][fn+1:])
d['top 250 rank'] = td
except ValueError:
self._mobile_logger.warn('wrong top250: %s', top250)
castdata = _findBetween(cont, 'Cast overview', '</table>', maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Credited cast', '</table>', maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Complete credited cast', '</table>',
maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Series Cast Summary', '</table>',
maxRes=1)
if not castdata:
castdata = _findBetween(cont, 'Episode Credited cast', '</table>',
maxRes=1)
if castdata:
castdata = castdata[0]
# Reintegrate the fist tag.
fl = castdata.find('href=')
if fl != -1: castdata = '<a ' + castdata[fl:]
# Exclude the 'rest of cast listed alphabetically' row.
smib = castdata.find('<tr><td align="center" colspan="4"><small>')
if smib != -1:
smie = castdata.rfind('</small></td></tr>')
if smie != -1:
castdata = castdata[:smib].strip() + \
castdata[smie+18:].strip()
castdata = castdata.replace('/tr> <tr', '/tr><tr')
cast = self._getPersons(castdata, sep='</tr><tr')
if cast: d['cast'] = cast
akas = _findBetween(cont, 'Also Known As:</h5>', '</div>', maxRes=1)
if akas:
# For some reason, here <br> is still used in place of <br/>.
akas[:] = [x for x in akas[0].split('<br>') if x.strip()]
akas = [_unHtml(x).replace('" - ','::', 1).lstrip('"').strip()
for x in akas]
if 'See more' in akas: akas.remove('See more')
akas[:] = [x for x in akas if x]
if akas:
d['akas'] = akas
mpaa = _findBetween(cont, 'MPAA</a>:', '</div>', maxRes=1)
if mpaa: d['mpaa'] = _unHtml(mpaa[0])
runtimes = _findBetween(cont, 'Runtime:</h5>', '</div>', maxRes=1)
if runtimes:
runtimes = runtimes[0]
runtimes = [x.strip().replace(' min', '').replace(' (', '::(', 1)
for x in runtimes.split('|')]
d['runtimes'] = [_unHtml(x).strip() for x in runtimes]
if kind == 'episode':
# number of episodes.
epsn = _findBetween(cont, 'title="Full Episode List">', '</a>',
maxRes=1)
if epsn:
epsn = epsn[0].replace(' Episodes', '').strip()
if epsn:
try:
epsn = int(epsn)
except:
self._mobile_logger.warn('wrong episodes #: %s', epsn)
d['number of episodes'] = epsn
country = _findBetween(cont, 'Country:</h5>', '</div>', maxRes=1)
if country:
country[:] = country[0].split(' | ')
country[:] = ['<a %s' % x for x in country if x]
country[:] = [_unHtml(x.replace(' <i>', '::')) for x in country]
if country: d['countries'] = country
lang = _findBetween(cont, 'Language:</h5>', '</div>', maxRes=1)
if lang:
lang[:] = lang[0].split(' | ')
lang[:] = ['<a %s' % x for x in lang if x]
lang[:] = [_unHtml(x.replace(' <i>', '::')) for x in lang]
if lang: d['languages'] = lang
col = _findBetween(cont, '"/search/title?colors=', '</div>')
if col:
col[:] = col[0].split(' | ')
col[:] = ['<a %s' % x for x in col if x]
col[:] = [_unHtml(x.replace(' <i>', '::')) for x in col]
if col: d['color info'] = col
sm = _findBetween(cont, '/search/title?sound_mixes=', '</div>',
maxRes=1)
if sm:
sm[:] = sm[0].split(' | ')
sm[:] = ['<a %s' % x for x in sm if x]
sm[:] = [_unHtml(x.replace(' <i>', '::')) for x in sm]
if sm: d['sound mix'] = sm
cert = _findBetween(cont, 'Certification:</h5>', '</div>', maxRes=1)
if cert:
cert[:] = cert[0].split(' | ')
cert[:] = [_unHtml(x.replace(' <i>', '::')) for x in cert]
if cert: d['certificates'] = cert
plotoutline = _findBetween(cont, 'Plot:</h5>', ['<a ', '</div>'],
maxRes=1)
if plotoutline:
plotoutline = plotoutline[0].strip()
plotoutline = plotoutline.rstrip('|').rstrip()
if plotoutline: d['plot outline'] = _unHtml(plotoutline)
aratio = _findBetween(cont, 'Aspect Ratio:</h5>', ['<a ', '</div>'],
maxRes=1)
if aratio:
aratio = aratio[0].strip().replace(' (', '::(', 1)
if aratio:
d['aspect ratio'] = _unHtml(aratio)
return {'data': d}
def get_movie_plot(self, movieID):
cont = self._mretrieve(imdbURL_movie_main % movieID + 'plotsummary')
plot = _findBetween(cont, '<p class="plotpar">', '</p>')
plot[:] = [_unHtml(x) for x in plot]
for i in xrange(len(plot)):
p = plot[i]
wbyidx = p.rfind(' Written by ')
if wbyidx != -1:
plot[i] = '%s::%s' % \
(p[:wbyidx].rstrip(),
p[wbyidx+12:].rstrip().replace('{','<').replace('}','>'))
if plot: return {'data': {'plot': plot}}
return {'data': {}}
def _search_person(self, name, results):
##params = urllib.urlencode({'nm': 'on', 'mx': str(results), 'q': name})
##params = 'q=%s&nm=on&mx=%s' % (urllib.quote_plus(name), str(results))
##cont = self._mretrieve(imdbURL_search % params)
cont = subXMLRefs(self._get_search_content('nm', name, results))
name = _findBetween(cont, '<title>', '</title>', maxRes=1)
res = []
if not name:
self._mobile_logger.warn('no title tag searching for name %s', name)
return res
nl = name[0].lower()
if not nl.startswith('imdb name'):
# a direct hit!
name = _unHtml(name[0])
name = name.replace('- Filmography by type' , '').strip()
pid = None
pidtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1)
if pidtag:
pid = _findBetween(pidtag[0], '/name/nm', '/', maxRes=1)
if not (pid and name):
self._mobile_logger.error('no direct hit name/personID for' \
' name %s', name)
return res
res[:] = [(str(pid[0]), analyze_name(name, canonical=1))]
else:
lis = _findBetween(cont, 'td valign="top">', '</td>',
maxRes=results*3)
for li in lis:
akas = _findBetween(li, '<em>"', '"</em>')
for sep in ['<small', '<br> aka', '<br> birth name']:
sepIdx = li.find(sep)
if sepIdx != -1:
li = li[:sepIdx]
pid = re_imdbID.findall(li)
pname = _unHtml(li)
if not (pid and pname):
self._mobile_logger.debug('no name/personID parsing' \
' %s searching for name %s', li,
name)
continue
resd = analyze_name(pname, canonical=1)
if akas:
resd['akas'] = akas
res.append((str(pid[0]), resd))
return res
def get_person_main(self, personID, _parseChr=False):
if not _parseChr:
url = imdbURL_person_main % personID + 'maindetails'
else:
url = imdbURL_character_main % personID
s = self._mretrieve(url)
r = {}
name = _findBetween(s, '<title>', '</title>', maxRes=1)
if not name:
if _parseChr: w = 'characterID'
else: w = 'personID'
raise IMDbDataAccessError, 'unable to get %s "%s"' % (w, personID)
name = _unHtml(name[0].replace(' - IMDb', ''))
if _parseChr:
name = name.replace('(Character)', '').strip()
name = name.replace('- Filmography by type', '').strip()
else:
name = name.replace('- Filmography by', '').strip()
r = analyze_name(name, canonical=not _parseChr)
for dKind in ('Born', 'Died'):
date = _findBetween(s, '%s:</h4>' % dKind.capitalize(),
('<div class', '</div>', '<br/><br/>'), maxRes=1)
if date:
date = _unHtml(date[0])
if date:
#date, notes = date_and_notes(date)
# TODO: fix to handle real names.
date_notes = date.split(' in ', 1)
notes = u''
date = date_notes[0]
if len(date_notes) == 2:
notes = date_notes[1]
dtitle = 'birth'
if dKind == 'Died':
dtitle = 'death'
if date:
r['%s date' % dtitle] = date
if notes:
r['%s notes' % dtitle] = notes
akas = _findBetween(s, 'Alternate Names:</h5>', ('</div>',
'<br/><br/>'), maxRes=1)
if akas:
akas = akas[0]
if akas.find(' | ') != -1:
akas = _unHtml(akas).split(' | ')
else:
akas = _unHtml(akas).split(' / ')
if akas: r['akas'] = akas
hs = _findBetween(s, 'name="headshot"', '</a>', maxRes=1)
if hs:
hs[:] = _findBetween(hs[0], 'src="', '"', maxRes=1)
if hs: r['headshot'] = hs[0]
# Build a list of tuples such [('hrefLink', 'section name')]
workkind = _findBetween(s, 'id="jumpto_', '</a>')
ws = []
for work in workkind:
sep = '" >'
if '">' in work:
sep = '">'
wsplit = work.split(sep, 1)
if len(wsplit) == 2:
sect = wsplit[0]
if '"' in sect:
sect = sect[:sect.find('"')]
ws.append((sect, wsplit[1].lower()))
# XXX: I think "guest appearances" are gone.
if s.find('<a href="#guest-appearances"') != -1:
ws.append(('guest-appearances', 'notable tv guest appearances'))
#if _parseChr:
# ws.append(('filmography', 'filmography'))
for sect, sectName in ws:
raws = u''
# Everything between the current section link and the end
# of the <ol> tag.
if _parseChr and sect == 'filmography':
inisect = s.find('<div class="filmo">')
else:
inisect = s.find('<a name="%s' % sect)
if inisect != -1:
endsect = s[inisect:].find('<div id="filmo-head-')
if endsect != -1: raws = s[inisect:inisect+endsect]
if not raws: continue
mlist = _findBetween(raws, '<div class="filmo-row',
('<div class="clear"/>',))
for m in mlist:
fCB = m.find('>')
if fCB != -1:
m = m[fCB+1:].lstrip()
m = re_filmo_episodes.sub('', m)
# For every movie in the current section.
movieID = re_imdbID.findall(m)
if not movieID:
self._mobile_logger.debug('no movieID in %s', m)
continue
m = m.replace('<br/>', ' .... ', 1)
if not _parseChr:
chrIndx = m.find(' .... ')
else:
chrIndx = m.find(' Played by ')
chids = []
if chrIndx != -1:
chrtxt = m[chrIndx+6:]
if _parseChr:
chrtxt = chrtxt[5:]
for ch in chrtxt.split(' / '):
chid = re_imdbID.findall(ch)
if not chid:
chids.append(None)
else:
chids.append(chid[-1])
if not chids:
chids = None
elif len(chids) == 1:
chids = chids[0]
movieID = str(movieID[0])
# Search the status.
stidx = m.find('<i>')
status = u''
if stidx != -1:
stendidx = m.rfind('</i>')
if stendidx != -1:
status = _unHtml(m[stidx+3:stendidx])
m = m.replace(m[stidx+3:stendidx], '')
year = _findBetween(m, 'year_column">', '</span>', maxRes=1)
if year:
year = year[0]
m = m.replace('<span class="year_column">%s</span>' % year,
'')
else:
year = None
m = _unHtml(m)
if not m:
self._mobile_logger.warn('no title for movieID %s', movieID)
continue
movie = build_movie(m, movieID=movieID, status=status,
roleID=chids, modFunct=self._defModFunct,
accessSystem=self.accessSystem,
_parsingCharacter=_parseChr, year=year)
sectName = sectName.split(':')[0]
r.setdefault(sectName, []).append(movie)
# If available, take the always correct name from a form.
itag = _getTagsWith(s, 'NAME="primary"', maxRes=1)
if not itag:
itag = _getTagsWith(s, 'name="primary"', maxRes=1)
if itag:
vtag = _findBetween(itag[0], 'VALUE="', ('"', '>'), maxRes=1)
if not vtag:
vtag = _findBetween(itag[0], 'value="', ('"', '>'), maxRes=1)
if vtag:
try:
vtag = unquote(str(vtag[0]))
vtag = unicode(vtag, 'latin_1')
r.update(analyze_name(vtag))
except UnicodeEncodeError:
pass
return {'data': r, 'info sets': ('main', 'filmography')}
def get_person_biography(self, personID):
cont = self._mretrieve(imdbURL_person_main % personID + 'bio')
d = {}
spouses = _findBetween(cont, 'Spouse</h5>', ('</table>', '</dd>'),
maxRes=1)
if spouses:
sl = []
for spouse in spouses[0].split('</tr>'):
if spouse.count('</td>') > 1:
spouse = spouse.replace('</td>', '::</td>', 1)
spouse = _unHtml(spouse)
spouse = spouse.replace(':: ', '::').strip()
if spouse: sl.append(spouse)
if sl: d['spouse'] = sl
nnames = _findBetween(cont, '<h5>Nickname</h5>', ('<br/> <br/>','<h5>'),
maxRes=1)
if nnames:
nnames = nnames[0]
if nnames:
nnames = [x.strip().replace(' (', '::(', 1)
for x in nnames.split('<br/>')]
if nnames:
d['nick names'] = nnames
misc_sects = _findBetween(cont, '<h5>', '<br/>')
misc_sects[:] = [x.split('</h5>') for x in misc_sects]
misc_sects[:] = [x for x in misc_sects if len(x) == 2]
for sect, data in misc_sects:
sect = sect.lower().replace(':', '').strip()
if d.has_key(sect) and sect != 'mini biography': continue
elif sect in ('spouse', 'nickname'): continue
if sect == 'salary': sect = 'salary history'
elif sect == 'where are they now': sect = 'where now'
elif sect == 'personal quotes': sect = 'quotes'
data = data.replace('</p><p>', '::')
data = data.replace('<br><br>', ' ') # for multi-paragraphs 'bio'
data = data.replace('</td> <td valign="top">', '@@@@')
data = data.replace('</td> </tr>', '::')
data = _unHtml(data)
data = [x.strip() for x in data.split('::')]
data[:] = [x.replace('@@@@', '::') for x in data if x]
if sect == 'height' and data: data = data[0]
elif sect == 'birth name': data = canonicalName(data[0])
elif sect == 'date of birth':
date, notes = date_and_notes(data[0])
if date:
d['birth date'] = date
if notes:
d['birth notes'] = notes
continue
elif sect == 'date of death':
date, notes = date_and_notes(data[0])
if date:
d['death date'] = date
if notes:
d['death notes'] = notes
continue
elif sect == 'mini biography':
ndata = []
for bio in data:
byidx = bio.rfind('IMDb Mini Biography By')
if byidx != -1:
bioAuth = bio[:byidx].rstrip()
else:
bioAuth = 'Anonymous'
bio = u'%s::%s' % (bioAuth, bio[byidx+23:].lstrip())
ndata.append(bio)
data[:] = ndata
if 'mini biography' in d:
d['mini biography'].append(ndata[0])
continue
d[sect] = data
return {'data': d}
def _search_character(self, name, results):
cont = subXMLRefs(self._get_search_content('char', name, results))
name = _findBetween(cont, '<title>', '</title>', maxRes=1)
res = []
if not name:
self._mobile_logger.error('no title tag searching character %s',
name)
return res
nl = name[0].lower()
if not (nl.startswith('imdb search') or nl.startswith('imdb search') \
or nl.startswith('imdb character')):
# a direct hit!
name = _unHtml(name[0]).replace('(Character)', '').strip()
pid = None
pidtag = _getTagsWith(cont, 'rel="canonical"', maxRes=1)
if pidtag:
pid = _findBetween(pidtag[0], '/character/ch', '/', maxRes=1)
if not (pid and name):
self._mobile_logger.error('no direct hit name/characterID for' \
' character %s', name)
return res
res[:] = [(str(pid[0]), analyze_name(name))]
else:
sects = _findBetween(cont, '<b>Popular Characters</b>', '</table>',
maxRes=results*3)
sects += _findBetween(cont, '<b>Characters', '</table>',
maxRes=results*3)
for sect in sects:
lis = _findBetween(sect, '<a href="/character/',
['<small', '</td>', '<br'])
for li in lis:
li = '<%s' % li
pid = re_imdbID.findall(li)
pname = _unHtml(li)
if not (pid and pname):
self._mobile_logger.debug('no name/characterID' \
' parsing %s searching for' \
' character %s', li, name)
continue
res.append((str(pid[0]), analyze_name(pname)))
return res
def get_character_main(self, characterID):
return self.get_person_main(characterID, _parseChr=True)
def get_character_biography(self, characterID):
cont = self._mretrieve(imdbURL_character_main % characterID + 'bio')
d = {}
intro = _findBetween(cont, '<div class="display">',
('<span>', '<h4>'), maxRes=1)
if intro:
intro = _unHtml(intro[0]).strip()
if intro:
d['introduction'] = intro
bios = _findBetween(cont, '<div class="display">',
'<div class="history">')
if bios:
bios = _findBetween(bios[0], '<h4>', ('<h4>', '</div>'))
if bios:
for bio in bios:
bio = bio.replace('</h4>', '::')
bio = bio.replace('\n', ' ')
bio = bio.replace('<br>', '\n')
bio = bio.replace('<br/>', '\n')
bio = subSGMLRefs(re_unhtmlsub('', bio).strip())
bio = bio.replace(' ::', '::').replace(':: ', '::')
bio = bio.replace('::', ': ', 1)
if bio:
d.setdefault('biography', []).append(bio)
return {'data': d}
| Python |
"""
parser package (imdb package).
This package provides various parsers to access IMDb data (e.g.: a
parser for the web/http interface, a parser for the SQL database
interface, etc.).
So far, the http/httpThin, mobile and sql parsers are implemented.
Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['http', 'mobile', 'sql']
| Python |
"""
_logging module (imdb package).
This module provides the logging facilities used by the imdb package.
Copyright 2009-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
imdbpyLogger = logging.getLogger('imdbpy')
imdbpyStreamHandler = logging.StreamHandler()
imdbpyFormatter = logging.Formatter('%(asctime)s %(levelname)s [%(name)s]' \
' %(pathname)s:%(lineno)d: %(message)s')
imdbpyStreamHandler.setFormatter(imdbpyFormatter)
imdbpyLogger.addHandler(imdbpyStreamHandler)
def setLevel(level):
"""Set logging level for the main logger."""
level = level.lower().strip()
imdbpyLogger.setLevel(LEVELS.get(level, logging.NOTSET))
imdbpyLogger.log(imdbpyLogger.level, 'set logging threshold to "%s"',
logging.getLevelName(imdbpyLogger.level))
#imdbpyLogger.setLevel(logging.DEBUG)
# It can be an idea to have a single function to log and warn:
#import warnings
#def log_and_warn(msg, args=None, logger=None, level=None):
# """Log the message and issue a warning."""
# if logger is None:
# logger = imdbpyLogger
# if level is None:
# level = logging.WARNING
# if args is None:
# args = ()
# #warnings.warn(msg % args, stacklevel=0)
# logger.log(level, msg % args)
| Python |
"""
_exceptions module (imdb package).
This module provides the exception hierarchy used by the imdb package.
Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
class IMDbError(Exception):
"""Base class for every exception raised by the imdb package."""
_logger = logging.getLogger('imdbpy')
def __init__(self, *args, **kwargs):
"""Initialize the exception and pass the message to the log system."""
# Every raised exception also dispatch a critical log.
self._logger.critical('%s exception raised; args: %s; kwds: %s',
self.__class__.__name__, args, kwargs,
exc_info=True)
super(IMDbError, self).__init__(*args, **kwargs)
class IMDbDataAccessError(IMDbError):
"""Exception raised when is not possible to access needed data."""
pass
class IMDbParserError(IMDbError):
"""Exception raised when an error occurred parsing the data."""
pass
| Python |
"""
utils module (imdb package).
This module provides basic utilities for the imdb package.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from __future__ import generators
import re
import string
import logging
from copy import copy, deepcopy
from time import strptime, strftime
from imdb import VERSION
from imdb import articles
from imdb._exceptions import IMDbParserError
# Logger for imdb.utils module.
_utils_logger = logging.getLogger('imdbpy.utils')
# The regular expression for the "long" year format of IMDb, like
# "(1998)" and "(1986/II)", where the optional roman number (that I call
# "imdbIndex" after the slash is used for movies with the same title
# and year of release.
# XXX: probably L, C, D and M are far too much! ;-)
re_year_index = re.compile(r'\(([0-9\?]{4}(/[IVXLCDM]+)?)\)')
# Match only the imdbIndex (for name strings).
re_index = re.compile(r'^\(([IVXLCDM]+)\)$')
# Match the number of episodes.
re_episodes = re.compile('\s?\((\d+) episodes\)', re.I)
re_episode_info = re.compile(r'{\s*(.+?)?\s?(\([0-9\?]{4}-[0-9\?]{1,2}-[0-9\?]{1,2}\))?\s?(\(#[0-9]+\.[0-9]+\))?}')
# Common suffixes in surnames.
_sname_suffixes = ('de', 'la', 'der', 'den', 'del', 'y', 'da', 'van',
'e', 'von', 'the', 'di', 'du', 'el', 'al')
def canonicalName(name):
"""Return the given name in canonical "Surname, Name" format.
It assumes that name is in the 'Name Surname' format."""
# XXX: some statistics (as of 17 Apr 2008, over 2288622 names):
# - just a surname: 69476
# - single surname, single name: 2209656
# - composed surname, composed name: 9490
# - composed surname, single name: 67606
# (2: 59764, 3: 6862, 4: 728)
# - single surname, composed name: 242310
# (2: 229467, 3: 9901, 4: 2041, 5: 630)
# - Jr.: 8025
# Don't convert names already in the canonical format.
if name.find(', ') != -1: return name
if isinstance(name, unicode):
joiner = u'%s, %s'
sur_joiner = u'%s %s'
sur_space = u' %s'
space = u' '
else:
joiner = '%s, %s'
sur_joiner = '%s %s'
sur_space = ' %s'
space = ' '
sname = name.split(' ')
snl = len(sname)
if snl == 2:
# Just a name and a surname: how boring...
name = joiner % (sname[1], sname[0])
elif snl > 2:
lsname = [x.lower() for x in sname]
if snl == 3: _indexes = (0, snl-2)
else: _indexes = (0, snl-2, snl-3)
# Check for common surname prefixes at the beginning and near the end.
for index in _indexes:
if lsname[index] not in _sname_suffixes: continue
try:
# Build the surname.
surn = sur_joiner % (sname[index], sname[index+1])
del sname[index]
del sname[index]
try:
# Handle the "Jr." after the name.
if lsname[index+2].startswith('jr'):
surn += sur_space % sname[index]
del sname[index]
except (IndexError, ValueError):
pass
name = joiner % (surn, space.join(sname))
break
except ValueError:
continue
else:
name = joiner % (sname[-1], space.join(sname[:-1]))
return name
def normalizeName(name):
"""Return a name in the normal "Name Surname" format."""
if isinstance(name, unicode):
joiner = u'%s %s'
else:
joiner = '%s %s'
sname = name.split(', ')
if len(sname) == 2:
name = joiner % (sname[1], sname[0])
return name
def analyze_name(name, canonical=None):
"""Return a dictionary with the name and the optional imdbIndex
keys, from the given string.
If canonical is None (default), the name is stored in its own style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
raise an IMDbParserError exception if the name is not valid.
"""
original_n = name
name = name.strip()
res = {}
imdbIndex = ''
opi = name.rfind('(')
if opi != -1:
cpi = name.rfind(')')
if cpi > opi and re_index.match(name[opi:cpi+1]):
imdbIndex = name[opi+1:cpi]
name = name[:opi].rstrip()
else:
# XXX: for the birth and death dates case like " (1926-2004)"
name = name[:opi-1]
if not name:
raise IMDbParserError, 'invalid name: "%s"' % original_n
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
res['name'] = name
if imdbIndex:
res['imdbIndex'] = imdbIndex
return res
def build_name(name_dict, canonical=None):
"""Given a dictionary that represents a "long" IMDb name,
return a string.
If canonical is None (default), the name is returned in the stored style.
If canonical is True, the name is converted to canonical style.
If canonical is False, the name is converted to normal format.
"""
name = name_dict.get('canonical name') or name_dict.get('name', '')
if not name: return ''
if canonical is not None:
if canonical:
name = canonicalName(name)
else:
name = normalizeName(name)
imdbIndex = name_dict.get('imdbIndex')
if imdbIndex:
name += ' (%s)' % imdbIndex
return name
# XXX: here only for backward compatibility. Find and remove any dependency.
_articles = articles.GENERIC_ARTICLES
_unicodeArticles = articles.toUnicode(_articles)
articlesDicts = articles.articlesDictsForLang(None)
spArticles = articles.spArticlesForLang(None)
def canonicalTitle(title, lang=None):
"""Return the title in the canonic format 'Movie Title, The';
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
articlesDicts = articles.articlesDictsForLang(lang)
try:
if title.split(', ')[-1].lower() in articlesDicts[isUnicode]:
return title
except IndexError:
pass
if isUnicode:
_format = u'%s, %s'
else:
_format = '%s, %s'
ltitle = title.lower()
spArticles = articles.spArticlesForLang(lang)
for article in spArticles[isUnicode]:
if ltitle.startswith(article):
lart = len(article)
title = _format % (title[lart:], title[:lart])
if article[-1] == ' ':
title = title[:-1]
break
## XXX: an attempt using a dictionary lookup.
##for artSeparator in (' ', "'", '-'):
## article = _articlesDict.get(ltitle.split(artSeparator)[0])
## if article is not None:
## lart = len(article)
## # check titles like "una", "I'm Mad" and "L'abbacchio".
## if title[lart:] == '' or (artSeparator != ' ' and
## title[lart:][1] != artSeparator): continue
## title = '%s, %s' % (title[lart:], title[:lart])
## if artSeparator == ' ': title = title[1:]
## break
return title
def normalizeTitle(title, lang=None):
"""Return the title in the normal "The Title" format;
beware that it doesn't handle long imdb titles, but only the
title portion, without year[/imdbIndex] or special markup.
The 'lang' argument can be used to specify the language of the title.
"""
isUnicode = isinstance(title, unicode)
stitle = title.split(', ')
articlesDicts = articles.articlesDictsForLang(lang)
if len(stitle) > 1 and stitle[-1].lower() in articlesDicts[isUnicode]:
sep = ' '
if stitle[-1][-1] in ("'", '-'):
sep = ''
if isUnicode:
_format = u'%s%s%s'
_joiner = u', '
else:
_format = '%s%s%s'
_joiner = ', '
title = _format % (stitle[-1], sep, _joiner.join(stitle[:-1]))
return title
def _split_series_episode(title):
"""Return the series and the episode titles; if this is not a
series' episode, the returned series title is empty.
This function recognize two different styles:
"The Series" An Episode (2005)
"The Series" (2004) {An Episode (2005) (#season.episode)}"""
series_title = ''
episode_or_year = ''
if title[-1:] == '}':
# Title of the episode, as in the plain text data files.
begin_eps = title.rfind('{')
if begin_eps == -1: return '', ''
series_title = title[:begin_eps].rstrip()
# episode_or_year is returned with the {...}
episode_or_year = title[begin_eps:].strip()
if episode_or_year[:12] == '{SUSPENDED}}': return '', ''
# XXX: works only with tv series; it's still unclear whether
# IMDb will support episodes for tv mini series and tv movies...
elif title[0:1] == '"':
second_quot = title[1:].find('"') + 2
if second_quot != 1: # a second " was found.
episode_or_year = title[second_quot:].lstrip()
first_char = episode_or_year[0:1]
if not first_char: return '', ''
if first_char != '(':
# There is not a (year) but the title of the episode;
# that means this is an episode title, as returned by
# the web server.
series_title = title[:second_quot]
##elif episode_or_year[-1:] == '}':
## # Title of the episode, as in the plain text data files.
## begin_eps = episode_or_year.find('{')
## if begin_eps == -1: return series_title, episode_or_year
## series_title = title[:second_quot+begin_eps].rstrip()
## # episode_or_year is returned with the {...}
## episode_or_year = episode_or_year[begin_eps:]
return series_title, episode_or_year
def is_series_episode(title):
"""Return True if 'title' is an series episode."""
title = title.strip()
if _split_series_episode(title)[0]: return 1
return 0
def analyze_title(title, canonical=None, canonicalSeries=None,
canonicalEpisode=None, _emptyString=u''):
"""Analyze the given title and return a dictionary with the
"stripped" title, the kind of the show ("movie", "tv series", etc.),
the year of production and the optional imdbIndex (a roman number
used to distinguish between movies with the same title and year).
If canonical is None (default), the title is stored in its own style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
raise an IMDbParserError exception if the title is not valid.
"""
# XXX: introduce the 'lang' argument?
if canonical is not None:
canonicalSeries = canonicalEpisode = canonical
original_t = title
result = {}
title = title.strip()
year = _emptyString
kind = _emptyString
imdbIndex = _emptyString
series_title, episode_or_year = _split_series_episode(title)
if series_title:
# It's an episode of a series.
series_d = analyze_title(series_title, canonical=canonicalSeries)
oad = sen = ep_year = _emptyString
# Plain text data files format.
if episode_or_year[0:1] == '{' and episode_or_year[-1:] == '}':
match = re_episode_info.findall(episode_or_year)
if match:
# Episode title, original air date and #season.episode
episode_or_year, oad, sen = match[0]
episode_or_year = episode_or_year.strip()
if not oad:
# No year, but the title is something like (2005-04-12)
if episode_or_year and episode_or_year[0] == '(' and \
episode_or_year[-1:] == ')' and \
episode_or_year[1:2] != '#':
oad = episode_or_year
if oad[1:5] and oad[5:6] == '-':
try:
ep_year = int(oad[1:5])
except (TypeError, ValueError):
pass
if not oad and not sen and episode_or_year.startswith('(#'):
sen = episode_or_year
elif episode_or_year.startswith('Episode dated'):
oad = episode_or_year[14:]
if oad[-4:].isdigit():
try:
ep_year = int(oad[-4:])
except (TypeError, ValueError):
pass
episode_d = analyze_title(episode_or_year, canonical=canonicalEpisode)
episode_d['kind'] = u'episode'
episode_d['episode of'] = series_d
if oad:
episode_d['original air date'] = oad[1:-1]
if ep_year and episode_d.get('year') is None:
episode_d['year'] = ep_year
if sen and sen[2:-1].find('.') != -1:
seas, epn = sen[2:-1].split('.')
if seas:
# Set season and episode.
try: seas = int(seas)
except: pass
try: epn = int(epn)
except: pass
episode_d['season'] = seas
if epn:
episode_d['episode'] = epn
return episode_d
# First of all, search for the kind of show.
# XXX: Number of entries at 17 Apr 2008:
# movie: 379,871
# episode: 483,832
# tv movie: 61,119
# tv series: 44,795
# video movie: 57,915
# tv mini series: 5,497
# video game: 5,490
# More up-to-date statistics: http://us.imdb.com/database_statistics
if title.endswith('(TV)'):
kind = u'tv movie'
title = title[:-4].rstrip()
elif title.endswith('(V)'):
kind = u'video movie'
title = title[:-3].rstrip()
elif title.endswith('(video)'):
kind = u'video movie'
title = title[:-7].rstrip()
elif title.endswith('(mini)'):
kind = u'tv mini series'
title = title[:-6].rstrip()
elif title.endswith('(VG)'):
kind = u'video game'
title = title[:-4].rstrip()
# Search for the year and the optional imdbIndex (a roman number).
yi = re_year_index.findall(title)
if yi:
last_yi = yi[-1]
year = last_yi[0]
if last_yi[1]:
imdbIndex = last_yi[1][1:]
year = year[:-len(imdbIndex)-1]
i = title.rfind('(%s)' % last_yi[0])
if i != -1:
title = title[:i-1].rstrip()
# This is a tv (mini) series: strip the '"' at the begin and at the end.
# XXX: strip('"') is not used for compatibility with Python 2.0.
if title and title[0] == title[-1] == '"':
if not kind:
kind = u'tv series'
title = title[1:-1].strip()
elif title.endswith('(TV series)'):
kind = u'tv series'
title = title[:-11].rstrip()
if not title:
raise IMDbParserError, 'invalid title: "%s"' % original_t
if canonical is not None:
if canonical:
title = canonicalTitle(title)
else:
title = normalizeTitle(title)
# 'kind' is one in ('movie', 'episode', 'tv series', 'tv mini series',
# 'tv movie', 'video movie', 'video game')
result['title'] = title
result['kind'] = kind or u'movie'
if year and year != '????':
try:
result['year'] = int(year)
except (TypeError, ValueError):
pass
if imdbIndex:
result['imdbIndex'] = imdbIndex
if isinstance(_emptyString, str):
result['kind'] = str(kind or 'movie')
return result
_web_format = '%d %B %Y'
_ptdf_format = '(%Y-%m-%d)'
def _convertTime(title, fromPTDFtoWEB=1, _emptyString=u''):
"""Convert a time expressed in the pain text data files, to
the 'Episode dated ...' format used on the web site; if
fromPTDFtoWEB is false, the inverted conversion is applied."""
try:
if fromPTDFtoWEB:
from_format = _ptdf_format
to_format = _web_format
else:
from_format = u'Episode dated %s' % _web_format
to_format = _ptdf_format
t = strptime(title, from_format)
title = strftime(to_format, t)
if fromPTDFtoWEB:
if title[0] == '0': title = title[1:]
title = u'Episode dated %s' % title
except ValueError:
pass
if isinstance(_emptyString, str):
try:
title = str(title)
except UnicodeDecodeError:
pass
return title
def build_title(title_dict, canonical=None, canonicalSeries=None,
canonicalEpisode=None, ptdf=0, lang=None, _doYear=1,
_emptyString=u''):
"""Given a dictionary that represents a "long" IMDb title,
return a string.
If canonical is None (default), the title is returned in the stored style.
If canonical is True, the title is converted to canonical style.
If canonical is False, the title is converted to normal format.
lang can be used to specify the language of the title.
If ptdf is true, the plain text data files format is used.
"""
if canonical is not None:
canonicalSeries = canonical
pre_title = _emptyString
kind = title_dict.get('kind')
episode_of = title_dict.get('episode of')
if kind == 'episode' and episode_of is not None:
# Works with both Movie instances and plain dictionaries.
doYear = 0
if ptdf:
doYear = 1
pre_title = build_title(episode_of, canonical=canonicalSeries,
ptdf=0, _doYear=doYear,
_emptyString=_emptyString)
ep_dict = {'title': title_dict.get('title', ''),
'imdbIndex': title_dict.get('imdbIndex')}
ep_title = ep_dict['title']
if not ptdf:
doYear = 1
ep_dict['year'] = title_dict.get('year', '????')
if ep_title[0:1] == '(' and ep_title[-1:] == ')' and \
ep_title[1:5].isdigit():
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=1,
_emptyString=_emptyString)
else:
doYear = 0
if ep_title.startswith('Episode dated'):
ep_dict['title'] = _convertTime(ep_title, fromPTDFtoWEB=0,
_emptyString=_emptyString)
episode_title = build_title(ep_dict,
canonical=canonicalEpisode, ptdf=ptdf,
_doYear=doYear, _emptyString=_emptyString)
if ptdf:
oad = title_dict.get('original air date', _emptyString)
if len(oad) == 10 and oad[4] == '-' and oad[7] == '-' and \
episode_title.find(oad) == -1:
episode_title += ' (%s)' % oad
seas = title_dict.get('season')
if seas is not None:
episode_title += ' (#%s' % seas
episode = title_dict.get('episode')
if episode is not None:
episode_title += '.%s' % episode
episode_title += ')'
episode_title = '{%s}' % episode_title
return '%s %s' % (pre_title, episode_title)
title = title_dict.get('title', '')
if not title: return _emptyString
if canonical is not None:
if canonical:
title = canonicalTitle(title, lang=lang)
else:
title = normalizeTitle(title, lang=lang)
if pre_title:
title = '%s %s' % (pre_title, title)
if kind in (u'tv series', u'tv mini series'):
title = '"%s"' % title
if _doYear:
imdbIndex = title_dict.get('imdbIndex')
year = title_dict.get('year') or u'????'
if isinstance(_emptyString, str):
year = str(year)
title += ' (%s' % year
if imdbIndex:
title += '/%s' % imdbIndex
title += ')'
if kind:
if kind == 'tv movie':
title += ' (TV)'
elif kind == 'video movie':
title += ' (V)'
elif kind == 'tv mini series':
title += ' (mini)'
elif kind == 'video game':
title += ' (VG)'
return title
def split_company_name_notes(name):
"""Return two strings, the first representing the company name,
and the other representing the (optional) notes."""
name = name.strip()
notes = u''
if name.endswith(')'):
fpidx = name.find('(')
if fpidx != -1:
notes = name[fpidx:]
name = name[:fpidx].rstrip()
return name, notes
def analyze_company_name(name, stripNotes=False):
"""Return a dictionary with the name and the optional 'country'
keys, from the given string.
If stripNotes is true, tries to not consider optional notes.
raise an IMDbParserError exception if the name is not valid.
"""
if stripNotes:
name = split_company_name_notes(name)[0]
o_name = name
name = name.strip()
country = None
if name.endswith(']'):
idx = name.rfind('[')
if idx != -1:
country = name[idx:]
name = name[:idx].rstrip()
if not name:
raise IMDbParserError, 'invalid name: "%s"' % o_name
result = {'name': name}
if country:
result['country'] = country
return result
def build_company_name(name_dict, _emptyString=u''):
"""Given a dictionary that represents a "long" IMDb company name,
return a string.
"""
name = name_dict.get('name')
if not name:
return _emptyString
country = name_dict.get('country')
if country is not None:
name += ' %s' % country
return name
class _LastC:
"""Size matters."""
def __cmp__(self, other):
if isinstance(other, self.__class__): return 0
return 1
_last = _LastC()
def cmpMovies(m1, m2):
"""Compare two movies by year, in reverse order; the imdbIndex is checked
for movies with the same year of production and title."""
# Sort tv series' episodes.
m1e = m1.get('episode of')
m2e = m2.get('episode of')
if m1e is not None and m2e is not None:
cmp_series = cmpMovies(m1e, m2e)
if cmp_series != 0:
return cmp_series
m1s = m1.get('season')
m2s = m2.get('season')
if m1s is not None and m2s is not None:
if m1s < m2s:
return 1
elif m1s > m2s:
return -1
m1p = m1.get('episode')
m2p = m2.get('episode')
if m1p < m2p:
return 1
elif m1p > m2p:
return -1
try:
if m1e is None: m1y = int(m1.get('year', 0))
else: m1y = int(m1e.get('year', 0))
except ValueError:
m1y = 0
try:
if m2e is None: m2y = int(m2.get('year', 0))
else: m2y = int(m2e.get('year', 0))
except ValueError:
m2y = 0
if m1y > m2y: return -1
if m1y < m2y: return 1
# Ok, these movies have the same production year...
#m1t = m1.get('canonical title', _last)
#m2t = m2.get('canonical title', _last)
# It should works also with normal dictionaries (returned from searches).
#if m1t is _last and m2t is _last:
m1t = m1.get('title', _last)
m2t = m2.get('title', _last)
if m1t < m2t: return -1
if m1t > m2t: return 1
# Ok, these movies have the same title...
m1i = m1.get('imdbIndex', _last)
m2i = m2.get('imdbIndex', _last)
if m1i > m2i: return -1
if m1i < m2i: return 1
m1id = getattr(m1, 'movieID', None)
# Introduce this check even for other comparisons functions?
# XXX: is it safe to check without knowning the data access system?
# probably not a great idea. Check for 'kind', instead?
if m1id is not None:
m2id = getattr(m2, 'movieID', None)
if m1id > m2id: return -1
elif m1id < m2id: return 1
return 0
def cmpPeople(p1, p2):
"""Compare two people by billingPos, name and imdbIndex."""
p1b = getattr(p1, 'billingPos', None) or _last
p2b = getattr(p2, 'billingPos', None) or _last
if p1b > p2b: return 1
if p1b < p2b: return -1
p1n = p1.get('canonical name', _last)
p2n = p2.get('canonical name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('imdbIndex', _last)
p2i = p2.get('imdbIndex', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
def cmpCompanies(p1, p2):
"""Compare two companies."""
p1n = p1.get('long imdb name', _last)
p2n = p2.get('long imdb name', _last)
if p1n is _last and p2n is _last:
p1n = p1.get('name', _last)
p2n = p2.get('name', _last)
if p1n > p2n: return 1
if p1n < p2n: return -1
p1i = p1.get('country', _last)
p2i = p2.get('country', _last)
if p1i > p2i: return 1
if p1i < p2i: return -1
return 0
# References to titles, names and characters.
# XXX: find better regexp!
re_titleRef = re.compile(r'_(.+?(?: \([0-9\?]{4}(?:/[IVXLCDM]+)?\))?(?: \(mini\)| \(TV\)| \(V\)| \(VG\))?)_ \(qv\)')
# FIXME: doesn't match persons with ' in the name.
re_nameRef = re.compile(r"'([^']+?)' \(qv\)")
# XXX: good choice? Are there characters with # in the name?
re_characterRef = re.compile(r"#([^']+?)# \(qv\)")
# Functions used to filter the text strings.
def modNull(s, titlesRefs, namesRefs, charactersRefs):
"""Do nothing."""
return s
def modClearTitleRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles references."""
return re_titleRef.sub(r'\1', s)
def modClearNameRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove names references."""
return re_nameRef.sub(r'\1', s)
def modClearCharacterRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove characters references"""
return re_characterRef.sub(r'\1', s)
def modClearRefs(s, titlesRefs, namesRefs, charactersRefs):
"""Remove titles, names and characters references."""
s = modClearTitleRefs(s, {}, {}, {})
s = modClearCharacterRefs(s, {}, {}, {})
return modClearNameRefs(s, {}, {}, {})
def modifyStrings(o, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Modify a string (or string values in a dictionary or strings
in a list), using the provided modFunct function and titlesRefs
namesRefs and charactersRefs references dictionaries."""
# Notice that it doesn't go any deeper than the first two levels in a list.
if isinstance(o, (unicode, str)):
return modFunct(o, titlesRefs, namesRefs, charactersRefs)
elif isinstance(o, (list, tuple, dict)):
_stillorig = 1
if isinstance(o, (list, tuple)): keys = xrange(len(o))
else: keys = o.keys()
for i in keys:
v = o[i]
if isinstance(v, (unicode, str)):
if _stillorig:
o = copy(o)
_stillorig = 0
o[i] = modFunct(v, titlesRefs, namesRefs, charactersRefs)
elif isinstance(v, (list, tuple)):
modifyStrings(o[i], modFunct, titlesRefs, namesRefs,
charactersRefs)
return o
def date_and_notes(s):
"""Parse (birth|death) date and notes; returns a tuple in the
form (date, notes)."""
s = s.strip()
if not s: return (u'', u'')
notes = u''
if s[0].isdigit() or s.split()[0].lower() in ('c.', 'january', 'february',
'march', 'april', 'may', 'june',
'july', 'august', 'september',
'october', 'november',
'december', 'ca.', 'circa',
'????,'):
i = s.find(',')
if i != -1:
notes = s[i+1:].strip()
s = s[:i]
else:
notes = s
s = u''
if s == '????': s = u''
return s, notes
class RolesList(list):
"""A list of Person or Character instances, used for the currentRole
property."""
def __unicode__(self):
return u' / '.join([unicode(x) for x in self])
def __str__(self):
# FIXME: does it make sense at all? Return a unicode doesn't
# seem right, in __str__.
return u' / '.join([unicode(x).encode('utf8') for x in self])
# Replace & with &, but only if it's not already part of a charref.
#_re_amp = re.compile(r'(&)(?!\w+;)', re.I)
#_re_amp = re.compile(r'(?<=\W)&(?=[^a-zA-Z0-9_#])')
_re_amp = re.compile(r'&(?![^a-zA-Z0-9_#]{1,5};)')
def escape4xml(value):
"""Escape some chars that can't be present in a XML value."""
if isinstance(value, int):
value = str(value)
value = _re_amp.sub('&', value)
value = value.replace('"', '"').replace("'", ''')
value = value.replace('<', '<').replace('>', '>')
if isinstance(value, unicode):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def _refsToReplace(value, modFunct, titlesRefs, namesRefs, charactersRefs):
"""Return three lists - for movie titles, persons and characters names -
with two items tuples: the first item is the reference once escaped
by the user-provided modFunct function, the second is the same
reference un-escaped."""
mRefs = []
for refRe, refTemplate in [(re_titleRef, u'_%s_ (qv)'),
(re_nameRef, u"'%s' (qv)"),
(re_characterRef, u'#%s# (qv)')]:
theseRefs = []
for theRef in refRe.findall(value):
# refTemplate % theRef values don't change for a single
# _Container instance, so this is a good candidate for a
# cache or something - even if it's so rarely used that...
# Moreover, it can grow - ia.update(...) - and change if
# modFunct is modified.
goodValue = modFunct(refTemplate % theRef, titlesRefs, namesRefs,
charactersRefs)
# Prevents problems with crap in plain text data files.
# We should probably exclude invalid chars and string that
# are too long in the re_*Ref expressions.
if '_' in goodValue or len(goodValue) > 128:
continue
toReplace = escape4xml(goodValue)
# Only the 'value' portion is replaced.
replaceWith = goodValue.replace(theRef, escape4xml(theRef))
theseRefs.append((toReplace, replaceWith))
mRefs.append(theseRefs)
return mRefs
def _handleTextNotes(s):
"""Split text::notes strings."""
ssplit = s.split('::', 1)
if len(ssplit) == 1:
return s
return u'%s<notes>%s</notes>' % (ssplit[0], ssplit[1])
def _normalizeValue(value, withRefs=False, modFunct=None, titlesRefs=None,
namesRefs=None, charactersRefs=None):
"""Replace some chars that can't be present in a XML text."""
# XXX: use s.encode(encoding, 'xmlcharrefreplace') ? Probably not
# a great idea: after all, returning a unicode is safe.
if isinstance(value, (unicode, str)):
if not withRefs:
value = _handleTextNotes(escape4xml(value))
else:
# Replace references that were accidentally escaped.
replaceLists = _refsToReplace(value, modFunct, titlesRefs,
namesRefs, charactersRefs)
value = modFunct(value, titlesRefs or {}, namesRefs or {},
charactersRefs or {})
value = _handleTextNotes(escape4xml(value))
for replaceList in replaceLists:
for toReplace, replaceWith in replaceList:
value = value.replace(toReplace, replaceWith)
else:
value = unicode(value)
return value
def _tag4TON(ton, addAccessSystem=False, _containerOnly=False):
"""Build a tag for the given _Container instance;
both open and close tags are returned."""
tag = ton.__class__.__name__.lower()
what = 'name'
if tag == 'movie':
value = ton.get('long imdb title') or ton.get('title', '')
what = 'title'
else:
value = ton.get('long imdb name') or ton.get('name', '')
value = _normalizeValue(value)
extras = u''
crl = ton.currentRole
if crl:
if not isinstance(crl, list):
crl = [crl]
for cr in crl:
crTag = cr.__class__.__name__.lower()
crValue = cr['long imdb name']
crValue = _normalizeValue(crValue)
crID = cr.getID()
if crID is not None:
extras += u'<current-role><%s id="%s">' \
u'<name>%s</name></%s>' % (crTag, crID,
crValue, crTag)
else:
extras += u'<current-role><%s><name>%s</name></%s>' % \
(crTag, crValue, crTag)
if cr.notes:
extras += u'<notes>%s</notes>' % _normalizeValue(cr.notes)
extras += u'</current-role>'
theID = ton.getID()
if theID is not None:
beginTag = u'<%s id="%s"' % (tag, theID)
if addAccessSystem and ton.accessSystem:
beginTag += ' access-system="%s"' % ton.accessSystem
if not _containerOnly:
beginTag += u'><%s>%s</%s>' % (what, value, what)
else:
beginTag += u'>'
else:
if not _containerOnly:
beginTag = u'<%s><%s>%s</%s>' % (tag, what, value, what)
else:
beginTag = u'<%s>' % tag
beginTag += extras
if ton.notes:
beginTag += u'<notes>%s</notes>' % _normalizeValue(ton.notes)
return (beginTag, u'</%s>' % tag)
TAGS_TO_MODIFY = {
'movie.parents-guide': ('item', True),
'movie.number-of-votes': ('item', True),
'movie.soundtrack.item': ('item', True),
'movie.quotes': ('quote', False),
'movie.quotes.quote': ('line', False),
'movie.demographic': ('item', True),
'movie.episodes': ('season', True),
'movie.episodes.season': ('episode', True),
'person.merchandising-links': ('item', True),
'person.genres': ('item', True),
'person.quotes': ('quote', False),
'person.keywords': ('item', True),
'character.quotes': ('item', True),
'character.quotes.item': ('quote', False),
'character.quotes.item.quote': ('line', False)
}
_allchars = string.maketrans('', '')
_keepchars = _allchars.translate(_allchars, string.ascii_lowercase + '-' +
string.digits)
def _tagAttr(key, fullpath):
"""Return a tuple with a tag name and a (possibly empty) attribute,
applying the conversions specified in TAGS_TO_MODIFY and checking
that the tag is safe for a XML document."""
attrs = {}
_escapedKey = escape4xml(key)
if fullpath in TAGS_TO_MODIFY:
tagName, useTitle = TAGS_TO_MODIFY[fullpath]
if useTitle:
attrs['key'] = _escapedKey
elif not isinstance(key, unicode):
if isinstance(key, str):
tagName = unicode(key, 'ascii', 'ignore')
else:
strType = str(type(key)).replace("<type '", "").replace("'>", "")
attrs['keytype'] = strType
tagName = unicode(key)
else:
tagName = key
if isinstance(key, int):
attrs['keytype'] = 'int'
origTagName = tagName
tagName = tagName.lower().replace(' ', '-')
tagName = str(tagName).translate(_allchars, _keepchars)
if origTagName != tagName:
if 'key' not in attrs:
attrs['key'] = _escapedKey
if (not tagName) or tagName[0].isdigit() or tagName[0] == '-':
# This is a fail-safe: we should never be here, since unpredictable
# keys must be listed in TAGS_TO_MODIFY.
# This will proably break the DTD/schema, but at least it will
# produce a valid XML.
tagName = 'item'
_utils_logger.error('invalid tag: %s [%s]' % (_escapedKey, fullpath))
attrs['key'] = _escapedKey
return tagName, u' '.join([u'%s="%s"' % i for i in attrs.items()])
def _seq2xml(seq, _l=None, withRefs=False, modFunct=None,
titlesRefs=None, namesRefs=None, charactersRefs=None,
_topLevel=True, key2infoset=None, fullpath=''):
"""Convert a sequence or a dictionary to a list of XML
unicode strings."""
if _l is None:
_l = []
if isinstance(seq, dict):
for key in seq:
value = seq[key]
if isinstance(key, _Container):
# Here we're assuming that a _Container is never a top-level
# key (otherwise we should handle key2infoset).
openTag, closeTag = _tag4TON(key)
# So that fullpath will contains something meaningful.
tagName = key.__class__.__name__.lower()
else:
tagName, attrs = _tagAttr(key, fullpath)
openTag = u'<%s' % tagName
if attrs:
openTag += ' %s' % attrs
if _topLevel and key2infoset and key in key2infoset:
openTag += u' infoset="%s"' % key2infoset[key]
if isinstance(value, int):
openTag += ' type="int"'
elif isinstance(value, float):
openTag += ' type="float"'
openTag += u'>'
closeTag = u'</%s>' % tagName
_l.append(openTag)
_seq2xml(value, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
elif isinstance(seq, (list, tuple)):
tagName, attrs = _tagAttr('item', fullpath)
beginTag = u'<%s' % tagName
if attrs:
beginTag += u' %s' % attrs
#beginTag += u'>'
closeTag = u'</%s>' % tagName
for item in seq:
if isinstance(item, _Container):
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath,
item.__class__.__name__.lower()))
else:
openTag = beginTag
if isinstance(item, int):
openTag += ' type="int"'
elif isinstance(item, float):
openTag += ' type="float"'
openTag += u'>'
_l.append(openTag)
_seq2xml(item, _l, withRefs, modFunct, titlesRefs,
namesRefs, charactersRefs, _topLevel=False,
fullpath='%s.%s' % (fullpath, tagName))
_l.append(closeTag)
else:
if isinstance(seq, _Container):
_l.extend(_tag4TON(seq))
else:
# Text, ints, floats and the like.
_l.append(_normalizeValue(seq, withRefs=withRefs,
modFunct=modFunct,
titlesRefs=titlesRefs,
namesRefs=namesRefs,
charactersRefs=charactersRefs))
return _l
_xmlHead = u"""<?xml version="1.0"?>
<!DOCTYPE %s SYSTEM "http://imdbpy.sf.net/dtd/imdbpy{VERSION}.dtd">
"""
_xmlHead = _xmlHead.replace('{VERSION}',
VERSION.replace('.', '').split('dev')[0][:2])
class _Container(object):
"""Base class for Movie, Person, Character and Company classes."""
# The default sets of information retrieved.
default_info = ()
# Aliases for some not-so-intuitive keys.
keys_alias = {}
# List of keys to modify.
keys_tomodify_list = ()
# Function used to compare two instances of this class.
cmpFunct = None
# Regular expression used to build the 'full-size (headshot|cover url)'.
_re_fullsizeURL = re.compile(r'\._V1\._SX(\d+)_SY(\d+)_')
def __init__(self, myID=None, data=None, notes=u'',
currentRole=u'', roleID=None, roleIsPerson=False,
accessSystem=None, titlesRefs=None, namesRefs=None,
charactersRefs=None, modFunct=None, *args, **kwds):
"""Initialize a Movie, Person, Character or Company object.
*myID* -- your personal identifier for this object.
*data* -- a dictionary used to initialize the object.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)' or the alias used in the
movie credits.
*accessSystem* -- a string representing the data access system used.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
self.reset()
self.accessSystem = accessSystem
self.myID = myID
if data is None: data = {}
self.set_data(data, override=1)
self.notes = notes
if titlesRefs is None: titlesRefs = {}
self.update_titlesRefs(titlesRefs)
if namesRefs is None: namesRefs = {}
self.update_namesRefs(namesRefs)
if charactersRefs is None: charactersRefs = {}
self.update_charactersRefs(charactersRefs)
self.set_mod_funct(modFunct)
self.keys_tomodify = {}
for item in self.keys_tomodify_list:
self.keys_tomodify[item] = None
self._roleIsPerson = roleIsPerson
if not roleIsPerson:
from imdb.Character import Character
self._roleClass = Character
else:
from imdb.Person import Person
self._roleClass = Person
self.currentRole = currentRole
if roleID:
self.roleID = roleID
self._init(*args, **kwds)
def _get_roleID(self):
"""Return the characterID or personID of the currentRole object."""
if not self.__role:
return None
if isinstance(self.__role, list):
return [x.getID() for x in self.__role]
return self.currentRole.getID()
def _set_roleID(self, roleID):
"""Set the characterID or personID of the currentRole object."""
if not self.__role:
# XXX: needed? Just ignore it? It's probably safer to
# ignore it, to prevent some bugs in the parsers.
#raise IMDbError,"Can't set ID of an empty Character/Person object."
pass
if not self._roleIsPerson:
if not isinstance(roleID, (list, tuple)):
self.currentRole.characterID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].characterID = item
else:
if not isinstance(roleID, (list, tuple)):
self.currentRole.personID = roleID
else:
for index, item in enumerate(roleID):
self.__role[index].personID = item
roleID = property(_get_roleID, _set_roleID,
doc="the characterID or personID of the currentRole object.")
def _get_currentRole(self):
"""Return a Character or Person instance."""
if self.__role:
return self.__role
return self._roleClass(name=u'', accessSystem=self.accessSystem,
modFunct=self.modFunct)
def _set_currentRole(self, role):
"""Set self.currentRole to a Character or Person instance."""
if isinstance(role, (unicode, str)):
if not role:
self.__role = None
else:
self.__role = self._roleClass(name=role, modFunct=self.modFunct,
accessSystem=self.accessSystem)
elif isinstance(role, (list, tuple)):
self.__role = RolesList()
for item in role:
if isinstance(item, (unicode, str)):
self.__role.append(self._roleClass(name=item,
accessSystem=self.accessSystem,
modFunct=self.modFunct))
else:
self.__role.append(item)
if not self.__role:
self.__role = None
else:
self.__role = role
currentRole = property(_get_currentRole, _set_currentRole,
doc="The role of a Person in a Movie" + \
" or the interpreter of a Character in a Movie.")
def _init(self, **kwds): pass
def reset(self):
"""Reset the object."""
self.data = {}
self.myID = None
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.modFunct = modClearRefs
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._reset()
def _reset(self): pass
def clear(self):
"""Reset the dictionary."""
self.data.clear()
self.notes = u''
self.titlesRefs = {}
self.namesRefs = {}
self.charactersRefs = {}
self.current_info = []
self.infoset2keys = {}
self.key2infoset = {}
self.__role = None
self._clear()
def _clear(self): pass
def get_current_info(self):
"""Return the current set of information retrieved."""
return self.current_info
def update_infoset_map(self, infoset, keys, mainInfoset):
"""Update the mappings between infoset and keys."""
if keys is None:
keys = []
if mainInfoset is not None:
theIS = mainInfoset
else:
theIS = infoset
self.infoset2keys[theIS] = keys
for key in keys:
self.key2infoset[key] = theIS
def set_current_info(self, ci):
"""Set the current set of information retrieved."""
# XXX:Remove? It's never used and there's no way to update infoset2keys.
self.current_info = ci
def add_to_current_info(self, val, keys=None, mainInfoset=None):
"""Add a set of information to the current list."""
if val not in self.current_info:
self.current_info.append(val)
self.update_infoset_map(val, keys, mainInfoset)
def has_current_info(self, val):
"""Return true if the given set of information is in the list."""
return val in self.current_info
def set_mod_funct(self, modFunct):
"""Set the fuction used to modify the strings."""
if modFunct is None: modFunct = modClearRefs
self.modFunct = modFunct
def update_titlesRefs(self, titlesRefs):
"""Update the dictionary with the references to movies."""
self.titlesRefs.update(titlesRefs)
def get_titlesRefs(self):
"""Return the dictionary with the references to movies."""
return self.titlesRefs
def update_namesRefs(self, namesRefs):
"""Update the dictionary with the references to names."""
self.namesRefs.update(namesRefs)
def get_namesRefs(self):
"""Return the dictionary with the references to names."""
return self.namesRefs
def update_charactersRefs(self, charactersRefs):
"""Update the dictionary with the references to characters."""
self.charactersRefs.update(charactersRefs)
def get_charactersRefs(self):
"""Return the dictionary with the references to characters."""
return self.charactersRefs
def set_data(self, data, override=0):
"""Set the movie data to the given dictionary; if 'override' is
set, the previous data is removed, otherwise the two dictionary
are merged.
"""
if not override:
self.data.update(data)
else:
self.data = data
def getID(self):
"""Return movieID, personID, characterID or companyID."""
raise NotImplementedError, 'override this method'
def __cmp__(self, other):
"""Compare two Movie, Person, Character or Company objects."""
# XXX: raise an exception?
if self.cmpFunct is None: return -1
if not isinstance(other, self.__class__): return -1
return self.cmpFunct(other)
def __hash__(self):
"""Hash for this object."""
# XXX: does it always work correctly?
theID = self.getID()
if theID is not None and self.accessSystem not in ('UNKNOWN', None):
# Handle 'http' and 'mobile' as they are the same access system.
acs = self.accessSystem
if acs in ('mobile', 'httpThin'):
acs = 'http'
# There must be some indication of the kind of the object, too.
s4h = '%s:%s[%s]' % (self.__class__.__name__, theID, acs)
else:
s4h = repr(self)
return hash(s4h)
def isSame(self, other):
"""Return True if the two represent the same object."""
if not isinstance(other, self.__class__): return 0
if hash(self) == hash(other): return 1
return 0
def __len__(self):
"""Number of items in the data dictionary."""
return len(self.data)
def getAsXML(self, key, _with_add_keys=True):
"""Return a XML representation of the specified key, or None
if empty. If _with_add_keys is False, dinamically generated
keys are excluded."""
# Prevent modifyStrings in __getitem__ to be called; if needed,
# it will be called by the _normalizeValue function.
origModFunct = self.modFunct
self.modFunct = modNull
# XXX: not totally sure it's a good idea, but could prevent
# problems (i.e.: the returned string always contains
# a DTD valid tag, and not something that can be only in
# the keys_alias map).
key = self.keys_alias.get(key, key)
if (not _with_add_keys) and (key in self._additional_keys()):
self.modFunct = origModFunct
return None
try:
withRefs = False
if key in self.keys_tomodify and \
origModFunct not in (None, modNull):
withRefs = True
value = self.get(key)
if value is None:
return None
tag = self.__class__.__name__.lower()
return u''.join(_seq2xml({key: value}, withRefs=withRefs,
modFunct=origModFunct,
titlesRefs=self.titlesRefs,
namesRefs=self.namesRefs,
charactersRefs=self.charactersRefs,
key2infoset=self.key2infoset,
fullpath=tag))
finally:
self.modFunct = origModFunct
def asXML(self, _with_add_keys=True):
"""Return a XML representation of the whole object.
If _with_add_keys is False, dinamically generated keys are excluded."""
beginTag, endTag = _tag4TON(self, addAccessSystem=True,
_containerOnly=True)
resList = [beginTag]
for key in self.keys():
value = self.getAsXML(key, _with_add_keys=_with_add_keys)
if not value:
continue
resList.append(value)
resList.append(endTag)
head = _xmlHead % self.__class__.__name__.lower()
return head + u''.join(resList)
def _getitem(self, key):
"""Handle special keys."""
return None
def __getitem__(self, key):
"""Return the value for a given key, checking key aliases;
a KeyError exception is raised if the key is not found.
"""
value = self._getitem(key)
if value is not None: return value
# Handle key aliases.
key = self.keys_alias.get(key, key)
rawData = self.data[key]
if key in self.keys_tomodify and \
self.modFunct not in (None, modNull):
try:
return modifyStrings(rawData, self.modFunct, self.titlesRefs,
self.namesRefs, self.charactersRefs)
except RuntimeError, e:
# Symbian/python 2.2 has a poor regexp implementation.
import warnings
warnings.warn('RuntimeError in '
"imdb.utils._Container.__getitem__; if it's not "
"a recursion limit exceeded and we're not running "
"in a Symbian environment, it's a bug:\n%s" % e)
return rawData
def __setitem__(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __delitem__(self, key):
"""Remove the given section or key."""
# XXX: how to remove an item of a section?
del self.data[key]
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
return []
def keys(self):
"""Return a list of valid keys."""
return self.data.keys() + self._additional_keys()
def items(self):
"""Return the items in the dictionary."""
return [(k, self.get(k)) for k in self.keys()]
# XXX: is this enough?
def iteritems(self): return self.data.iteritems()
def iterkeys(self): return self.data.iterkeys()
def itervalues(self): return self.data.itervalues()
def values(self):
"""Return the values in the dictionary."""
return [self.get(k) for k in self.keys()]
def has_key(self, key):
"""Return true if a given section is defined."""
try:
self.__getitem__(key)
except KeyError:
return 0
return 1
# XXX: really useful???
# consider also that this will confuse people who meant to
# call ia.update(movieObject, 'data set') instead.
def update(self, dict):
self.data.update(dict)
def get(self, key, failobj=None):
"""Return the given section, or default if it's not found."""
try:
return self.__getitem__(key)
except KeyError:
return failobj
def setdefault(self, key, failobj=None):
if not self.has_key(key):
self[key] = failobj
return self[key]
def pop(self, key, *args):
return self.data.pop(key, *args)
def popitem(self):
return self.data.popitem()
def __repr__(self):
"""String representation of an object."""
raise NotImplementedError, 'override this method'
def __str__(self):
"""Movie title or person name."""
raise NotImplementedError, 'override this method'
def __contains__(self, key):
raise NotImplementedError, 'override this method'
def append_item(self, key, item):
"""The item is appended to the list identified by the given key."""
self.data.setdefault(key, []).append(item)
def set_item(self, key, item):
"""Directly store the item with the given key."""
self.data[key] = item
def __nonzero__(self):
"""Return true if self.data contains something."""
if self.data: return 1
return 0
def __deepcopy__(self, memo):
raise NotImplementedError, 'override this method'
def copy(self):
"""Return a deep copy of the object itself."""
return deepcopy(self)
def flatten(seq, toDescend=(list, dict, tuple), yieldDictKeys=0,
onlyKeysType=(_Container,), scalar=None):
"""Iterate over nested lists and dictionaries; toDescend is a list
or a tuple of types to be considered non-scalar; if yieldDictKeys is
true, also dictionaries' keys are yielded; if scalar is not None, only
items of the given type(s) are yielded."""
if scalar is None or isinstance(seq, scalar):
yield seq
if isinstance(seq, toDescend):
if isinstance(seq, (dict, _Container)):
if yieldDictKeys:
# Yield also the keys of the dictionary.
for key in seq.iterkeys():
for k in flatten(key, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
if onlyKeysType and isinstance(k, onlyKeysType):
yield k
for value in seq.itervalues():
for v in flatten(value, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield v
elif not isinstance(seq, (str, unicode, int, float)):
for item in seq:
for i in flatten(item, toDescend=toDescend,
yieldDictKeys=yieldDictKeys,
onlyKeysType=onlyKeysType, scalar=scalar):
yield i
| Python |
"""
articles module (imdb package).
This module provides functions and data to handle in a smart way
articles (in various languages) at the beginning of movie titles.
Copyright 2009 Davide Alberani <da@erlug.linux.it>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at imdbpy-devel@lists.sourceforge.net; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'le', 'el', 'les', 'un', 'los', 'una', 'uno', 'unos',
'unas'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# imdbpy-devel@lists.sourceforge.net .
_LANG_COUNTRIES = {
'English': ('USA', 'UK', 'Canada', 'Ireland', 'Australia'),
'Italian': ('Italy',),
'Spanish': ('Spain', 'Mexico'),
'Portuguese': ('Portugal', 'Brazil'),
'Turkish': ('Turkey',),
#'German': ('Germany', 'East Germany', 'West Germany'),
#'French': ('France'),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in _LANG_COUNTRIES:
for country in _LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
| Python |
"""
helpers module (imdb package).
This module provides functions not used directly by the imdb package,
but useful for IMDbPY-based programs.
Copyright 2006-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# XXX: find better names for the functions in this modules.
import re
from cgi import escape
import gettext
from gettext import gettext as _
gettext.textdomain('imdbpy')
# The modClearRefs can be used to strip names and titles references from
# the strings in Movie and Person objects.
from imdb.utils import modClearRefs, re_titleRef, re_nameRef, \
re_characterRef, _tagAttr, _Container, TAGS_TO_MODIFY
from imdb import IMDb, imdbURL_movie_base, imdbURL_person_base, \
imdbURL_character_base
import imdb.locale
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.Character import Character
from imdb.Company import Company
from imdb.parser.http.utils import re_entcharrefssub, entcharrefs, \
subXMLRefs, subSGMLRefs
from imdb.parser.http.bsouplxml.etree import BeautifulSoup
# An URL, more or less.
_re_href = re.compile(r'(http://.+?)(?=\s|$)', re.I)
_re_hrefsub = _re_href.sub
def makeCgiPrintEncoding(encoding):
"""Make a function to pretty-print strings for the web."""
def cgiPrint(s):
"""Encode the given string using the %s encoding, and replace
chars outside the given charset with XML char references.""" % encoding
s = escape(s, quote=1)
if isinstance(s, unicode):
s = s.encode(encoding, 'xmlcharrefreplace')
return s
return cgiPrint
# cgiPrint uses the latin_1 encoding.
cgiPrint = makeCgiPrintEncoding('latin_1')
# Regular expression for %(varname)s substitutions.
re_subst = re.compile(r'%\((.+?)\)s')
# Regular expression for <if condition>....</if condition> clauses.
re_conditional = re.compile(r'<if\s+(.+?)\s*>(.+?)</if\s+\1\s*>')
def makeTextNotes(replaceTxtNotes):
"""Create a function useful to handle text[::optional_note] values.
replaceTxtNotes is a format string, which can include the following
values: %(text)s and %(notes)s.
Portions of the text can be conditionally excluded, if one of the
values is absent. E.g.: <if notes>[%(notes)s]</if notes> will be replaced
with '[notes]' if notes exists, or by an empty string otherwise.
The returned function is suitable be passed as applyToValues argument
of the makeObject2Txt function."""
def _replacer(s):
outS = replaceTxtNotes
if not isinstance(s, (unicode, str)):
return s
ssplit = s.split('::', 1)
text = ssplit[0]
# Used to keep track of text and note existence.
keysDict = {}
if text:
keysDict['text'] = True
outS = outS.replace('%(text)s', text)
if len(ssplit) == 2:
keysDict['notes'] = True
outS = outS.replace('%(notes)s', ssplit[1])
else:
outS = outS.replace('%(notes)s', u'')
def _excludeFalseConditionals(matchobj):
# Return an empty string if the conditional is false/empty.
if matchobj.group(1) in keysDict:
return matchobj.group(2)
return u''
while re_conditional.search(outS):
outS = re_conditional.sub(_excludeFalseConditionals, outS)
return outS
return _replacer
def makeObject2Txt(movieTxt=None, personTxt=None, characterTxt=None,
companyTxt=None, joiner=' / ',
applyToValues=lambda x: x, _recurse=True):
""""Return a function useful to pretty-print Movie, Person,
Character and Company instances.
*movieTxt* -- how to format a Movie object.
*personTxt* -- how to format a Person object.
*characterTxt* -- how to format a Character object.
*companyTxt* -- how to format a Company object.
*joiner* -- string used to join a list of objects.
*applyToValues* -- function to apply to values.
*_recurse* -- if True (default) manage only the given object.
"""
# Some useful defaults.
if movieTxt is None:
movieTxt = '%(long imdb title)s'
if personTxt is None:
personTxt = '%(long imdb name)s'
if characterTxt is None:
characterTxt = '%(long imdb name)s'
if companyTxt is None:
companyTxt = '%(long imdb name)s'
def object2txt(obj, _limitRecursion=None):
"""Pretty-print objects."""
# Prevent unlimited recursion.
if _limitRecursion is None:
_limitRecursion = 0
elif _limitRecursion > 5:
return u''
_limitRecursion += 1
if isinstance(obj, (list, tuple)):
return joiner.join([object2txt(o, _limitRecursion=_limitRecursion)
for o in obj])
elif isinstance(obj, dict):
# XXX: not exactly nice, neither useful, I fear.
return joiner.join([u'%s::%s' %
(object2txt(k, _limitRecursion=_limitRecursion),
object2txt(v, _limitRecursion=_limitRecursion))
for k, v in obj.items()])
objData = {}
if isinstance(obj, Movie):
objData['movieID'] = obj.movieID
outs = movieTxt
elif isinstance(obj, Person):
objData['personID'] = obj.personID
outs = personTxt
elif isinstance(obj, Character):
objData['characterID'] = obj.characterID
outs = characterTxt
elif isinstance(obj, Company):
objData['companyID'] = obj.companyID
outs = companyTxt
else:
return obj
def _excludeFalseConditionals(matchobj):
# Return an empty string if the conditional is false/empty.
condition = matchobj.group(1)
proceed = obj.get(condition) or getattr(obj, condition, None)
if proceed:
return matchobj.group(2)
else:
return u''
return matchobj.group(2)
while re_conditional.search(outs):
outs = re_conditional.sub(_excludeFalseConditionals, outs)
for key in re_subst.findall(outs):
value = obj.get(key) or getattr(obj, key, None)
if not isinstance(value, (unicode, str)):
if not _recurse:
if value:
value = unicode(value)
if value:
value = object2txt(value, _limitRecursion=_limitRecursion)
elif value:
value = applyToValues(unicode(value))
if not value:
value = u''
elif not isinstance(value, (unicode, str)):
value = unicode(value)
outs = outs.replace(u'%(' + key + u')s', value)
return outs
return object2txt
def makeModCGILinks(movieTxt, personTxt, characterTxt=None,
encoding='latin_1'):
"""Make a function used to pretty-print movies and persons refereces;
movieTxt and personTxt are the strings used for the substitutions.
movieTxt must contains %(movieID)s and %(title)s, while personTxt
must contains %(personID)s and %(name)s and characterTxt %(characterID)s
and %(name)s; characterTxt is optional, for backward compatibility."""
_cgiPrint = makeCgiPrintEncoding(encoding)
def modCGILinks(s, titlesRefs, namesRefs, characterRefs=None):
"""Substitute movies and persons references."""
if characterRefs is None: characterRefs = {}
# XXX: look ma'... more nested scopes! <g>
def _replaceMovie(match):
to_replace = match.group(1)
item = titlesRefs.get(to_replace)
if item:
movieID = item.movieID
to_replace = movieTxt % {'movieID': movieID,
'title': unicode(_cgiPrint(to_replace),
encoding,
'xmlcharrefreplace')}
return to_replace
def _replacePerson(match):
to_replace = match.group(1)
item = namesRefs.get(to_replace)
if item:
personID = item.personID
to_replace = personTxt % {'personID': personID,
'name': unicode(_cgiPrint(to_replace),
encoding,
'xmlcharrefreplace')}
return to_replace
def _replaceCharacter(match):
to_replace = match.group(1)
if characterTxt is None:
return to_replace
item = characterRefs.get(to_replace)
if item:
characterID = item.characterID
if characterID is None:
return to_replace
to_replace = characterTxt % {'characterID': characterID,
'name': unicode(_cgiPrint(to_replace),
encoding,
'xmlcharrefreplace')}
return to_replace
s = s.replace('<', '<').replace('>', '>')
s = _re_hrefsub(r'<a href="\1">\1</a>', s)
s = re_titleRef.sub(_replaceMovie, s)
s = re_nameRef.sub(_replacePerson, s)
s = re_characterRef.sub(_replaceCharacter, s)
return s
modCGILinks.movieTxt = movieTxt
modCGILinks.personTxt = personTxt
modCGILinks.characterTxt = characterTxt
return modCGILinks
# links to the imdb.com web site.
_movieTxt = '<a href="' + imdbURL_movie_base + 'tt%(movieID)s">%(title)s</a>'
_personTxt = '<a href="' + imdbURL_person_base + 'nm%(personID)s">%(name)s</a>'
_characterTxt = '<a href="' + imdbURL_character_base + \
'ch%(characterID)s">%(name)s</a>'
modHtmlLinks = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
characterTxt=_characterTxt)
modHtmlLinksASCII = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
characterTxt=_characterTxt,
encoding='ascii')
everyentcharrefs = entcharrefs.copy()
for k, v in {'lt':u'<','gt':u'>','amp':u'&','quot':u'"','apos':u'\''}.items():
everyentcharrefs[k] = v
everyentcharrefs['#%s' % ord(v)] = v
everyentcharrefsget = everyentcharrefs.get
re_everyentcharrefs = re.compile('&(%s|\#160|\#\d{1,5});' %
'|'.join(map(re.escape, everyentcharrefs)))
re_everyentcharrefssub = re_everyentcharrefs.sub
def _replAllXMLRef(match):
"""Replace the matched XML reference."""
ref = match.group(1)
value = everyentcharrefsget(ref)
if value is None:
if ref[0] == '#':
return unichr(int(ref[1:]))
else:
return ref
return value
def subXMLHTMLSGMLRefs(s):
"""Return the given string with XML/HTML/SGML entity and char references
replaced."""
return re_everyentcharrefssub(_replAllXMLRef, s)
def sortedSeasons(m):
"""Return a sorted list of seasons of the given series."""
seasons = m.get('episodes', {}).keys()
seasons.sort()
return seasons
def sortedEpisodes(m, season=None):
"""Return a sorted list of episodes of the given series,
considering only the specified season(s) (every season, if None)."""
episodes = []
seasons = season
if season is None:
seasons = sortedSeasons(m)
else:
if not isinstance(season, (tuple, list)):
seasons = [season]
for s in seasons:
eps_indx = m.get('episodes', {}).get(s, {}).keys()
eps_indx.sort()
for e in eps_indx:
episodes.append(m['episodes'][s][e])
return episodes
# Idea and portions of the code courtesy of none none (dclist at gmail.com)
_re_imdbIDurl = re.compile(r'\b(nm|tt|ch|co)([0-9]{7})\b')
def get_byURL(url, info=None, args=None, kwds=None):
"""Return a Movie, Person, Character or Company object for the given URL;
info is the info set to retrieve, args and kwds are respectively a list
and a dictionary or arguments to initialize the data access system.
Returns None if unable to correctly parse the url; can raise
exceptions if unable to retrieve the data."""
if args is None: args = []
if kwds is None: kwds = {}
ia = IMDb(*args, **kwds)
match = _re_imdbIDurl.search(url)
if not match:
return None
imdbtype = match.group(1)
imdbID = match.group(2)
if imdbtype == 'tt':
return ia.get_movie(imdbID, info=info)
elif imdbtype == 'nm':
return ia.get_person(imdbID, info=info)
elif imdbtype == 'ch':
return ia.get_character(imdbID, info=info)
elif imdbtype == 'co':
return ia.get_company(imdbID, info=info)
return None
# Idea and portions of code courtesy of Basil Shubin.
# Beware that these information are now available directly by
# the Movie/Person/Character instances.
def fullSizeCoverURL(obj):
"""Given an URL string or a Movie, Person or Character instance,
returns an URL to the full-size version of the cover/headshot,
or None otherwise. This function is obsolete: the same information
are available as keys: 'full-size cover url' and 'full-size headshot',
respectively for movies and persons/characters."""
if isinstance(obj, Movie):
coverUrl = obj.get('cover url')
elif isinstance(obj, (Person, Character)):
coverUrl = obj.get('headshot')
else:
coverUrl = obj
if not coverUrl:
return None
return _Container._re_fullsizeURL.sub('', coverUrl)
def keyToXML(key):
"""Return a key (the ones used to access information in Movie and
other classes instances) converted to the style of the XML output."""
return _tagAttr(key, '')[0]
def translateKey(key):
"""Translate a given key."""
return _(keyToXML(key))
# Maps tags to classes.
_MAP_TOP_OBJ = {
'person': Person,
'movie': Movie,
'character': Character,
'company': Company
}
# Tags to be converted to lists.
_TAGS_TO_LIST = dict([(x[0], None) for x in TAGS_TO_MODIFY.values()])
_TAGS_TO_LIST.update(_MAP_TOP_OBJ)
def tagToKey(tag):
"""Return the name of the tag, taking it from the 'key' attribute,
if present."""
keyAttr = tag.get('key')
if keyAttr:
if tag.get('keytype') == 'int':
keyAttr = int(keyAttr)
return keyAttr
return tag.name
def _valueWithType(tag, tagValue):
"""Return tagValue, handling some type conversions."""
tagType = tag.get('type')
if tagType == 'int':
tagValue = int(tagValue)
elif tagType == 'float':
tagValue = float(tagValue)
return tagValue
# Extra tags to get (if values were not already read from title/name).
_titleTags = ('imdbindex', 'kind', 'year')
_nameTags = ('imdbindex')
_companyTags = ('imdbindex', 'country')
def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None,
_key2infoset=None):
"""Recursively parse a tree of tags."""
# The returned object (usually a _Container subclass, but it can
# be a string, an int, a float, a list or a dictionary).
item = None
if _infoset2keys is None:
_infoset2keys = {}
if _key2infoset is None:
_key2infoset = {}
name = tagToKey(tag)
firstChild = tag.find(recursive=False)
tagStr = (tag.string or u'').strip()
if not tagStr and name == 'item':
# Handles 'item' tags containing text and a 'notes' sub-tag.
tagContent = tag.contents[0]
if isinstance(tagContent, BeautifulSoup.NavigableString):
tagStr = (unicode(tagContent) or u'').strip()
tagType = tag.get('type')
infoset = tag.get('infoset')
if infoset:
_key2infoset[name] = infoset
_infoset2keys.setdefault(infoset, []).append(name)
# Here we use tag.name to avoid tags like <item title="company">
if tag.name in _MAP_TOP_OBJ:
# One of the subclasses of _Container.
item = _MAP_TOP_OBJ[name]()
itemAs = tag.get('access-system')
if itemAs:
if not _as:
_as = itemAs
else:
itemAs = _as
item.accessSystem = itemAs
tagsToGet = []
theID = tag.get('id')
if name == 'movie':
item.movieID = theID
tagsToGet = _titleTags
theTitle = tag.find('title', recursive=False)
if tag.title:
item.set_title(tag.title.string)
tag.title.extract()
else:
if name == 'person':
item.personID = theID
tagsToGet = _nameTags
theName = tag.find('long imdb canonical name', recursive=False)
if not theName:
theName = tag.find('name', recursive=False)
elif name == 'character':
item.characterID = theID
tagsToGet = _nameTags
theName = tag.find('name', recursive=False)
elif name == 'company':
item.companyID = theID
tagsToGet = _companyTags
theName = tag.find('name', recursive=False)
if theName:
item.set_name(theName.string)
if theName:
theName.extract()
for t in tagsToGet:
if t in item.data:
continue
dataTag = tag.find(t, recursive=False)
if dataTag:
item.data[tagToKey(dataTag)] = _valueWithType(dataTag,
dataTag.string)
if tag.notes:
item.notes = tag.notes.string
tag.notes.extract()
episodeOf = tag.find('episode-of', recursive=False)
if episodeOf:
item.data['episode of'] = parseTags(episodeOf, _topLevel=False,
_as=_as, _infoset2keys=_infoset2keys,
_key2infoset=_key2infoset)
episodeOf.extract()
cRole = tag.find('current-role', recursive=False)
if cRole:
cr = parseTags(cRole, _topLevel=False, _as=_as,
_infoset2keys=_infoset2keys, _key2infoset=_key2infoset)
item.currentRole = cr
cRole.extract()
# XXX: big assumption, here. What about Movie instances used
# as keys in dictionaries? What about other keys (season and
# episode number, for example?)
if not _topLevel:
#tag.extract()
return item
_adder = lambda key, value: item.data.update({key: value})
elif tagStr:
if tag.notes:
notes = (tag.notes.string or u'').strip()
if notes:
tagStr += u'::%s' % notes
else:
tagStr = _valueWithType(tag, tagStr)
return tagStr
elif firstChild:
firstChildName = tagToKey(firstChild)
if firstChildName in _TAGS_TO_LIST:
item = []
_adder = lambda key, value: item.append(value)
else:
item = {}
_adder = lambda key, value: item.update({key: value})
else:
item = {}
_adder = lambda key, value: item.update({name: value})
for subTag in tag(recursive=False):
subTagKey = tagToKey(subTag)
# Exclude dinamically generated keys.
if tag.name in _MAP_TOP_OBJ and subTagKey in item._additional_keys():
continue
subItem = parseTags(subTag, _topLevel=False, _as=_as,
_infoset2keys=_infoset2keys, _key2infoset=_key2infoset)
if subItem:
_adder(subTagKey, subItem)
if _topLevel and name in _MAP_TOP_OBJ:
# Add information about 'info sets', but only to the top-level object.
item.infoset2keys = _infoset2keys
item.key2infoset = _key2infoset
item.current_info = _infoset2keys.keys()
return item
def parseXML(xml):
"""Parse a XML string, returning an appropriate object (usually an
instance of a subclass of _Container."""
xmlObj = BeautifulSoup.BeautifulStoneSoup(xml,
convertEntities=BeautifulSoup.BeautifulStoneSoup.XHTML_ENTITIES)
if xmlObj:
mainTag = xmlObj.find()
if mainTag:
return parseTags(mainTag)
return None
| Python |
"""
_compat module (imdb package).
This module provides compatibility functions used by the imdb package
to deal with unusual environments.
Copyright 2008-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# TODO: now we're heavily using the 'logging' module, which was not
# present in Python 2.2. To work in a Symbian environment, we
# need to create a fake 'logging' module (its functions may call
# the 'warnings' module, or do nothing at all).
import os
# If true, we're working on a Symbian device.
if os.name == 'e32':
# Replace os.path.expandvars and os.path.expanduser, if needed.
def _noact(x):
"""Ad-hoc replacement for IMDbPY."""
return x
try:
os.path.expandvars
except AttributeError:
os.path.expandvars = _noact
try:
os.path.expanduser
except AttributeError:
os.path.expanduser = _noact
# time.strptime is missing, on Symbian devices.
import time
try:
time.strptime
except AttributeError:
import re
_re_web_time = re.compile(r'Episode dated (\d+) (\w+) (\d+)')
_re_ptdf_time = re.compile(r'\((\d+)-(\d+)-(\d+)\)')
_month2digit = {'January': '1', 'February': '2', 'March': '3',
'April': '4', 'May': '5', 'June': '6', 'July': '7',
'August': '8', 'September': '9', 'October': '10',
'November': '11', 'December': '12'}
def strptime(s, format):
"""Ad-hoc strptime replacement for IMDbPY."""
try:
if format.startswith('Episode'):
res = _re_web_time.findall(s)[0]
return (int(res[2]), int(_month2digit[res[1]]), int(res[0]),
0, 0, 0, 0, 1, 0)
else:
res = _re_ptdf_time.findall(s)[0]
return (int(res[0]), int(res[1]), int(res[2]),
0, 0, 0, 0, 1, 0)
except:
raise ValueError, u'error in IMDbPY\'s ad-hoc strptime!'
time.strptime = strptime
| Python |
"""
imdb package.
This package can be used to retrieve information about a movie or
a person from the IMDb database.
It can fetch data through different media (e.g.: the IMDb web pages,
a SQL database, etc.)
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['IMDb', 'IMDbError', 'Movie', 'Person', 'Character', 'Company',
'available_access_systems']
__version__ = VERSION = '4.7'
# Import compatibility module (importing it is enough).
import _compat
import sys, os, ConfigParser, logging
from types import MethodType
from imdb import Movie, Person, Character, Company
import imdb._logging
from imdb._exceptions import IMDbError, IMDbDataAccessError
from imdb.utils import build_title, build_name, build_company_name
_aux_logger = logging.getLogger('imdbpy.aux')
# URLs of the main pages for movies, persons, characters and queries.
imdbURL_base = 'http://akas.imdb.com/'
# http://akas.imdb.com/title/
imdbURL_movie_base = '%stitle/' % imdbURL_base
# http://akas.imdb.com/title/tt%s/
imdbURL_movie_main = imdbURL_movie_base + 'tt%s/'
# http://akas.imdb.com/name/
imdbURL_person_base = '%sname/' % imdbURL_base
# http://akas.imdb.com/name/nm%s/
imdbURL_person_main = imdbURL_person_base + 'nm%s/'
# http://akas.imdb.com/character/
imdbURL_character_base = '%scharacter/' % imdbURL_base
# http://akas.imdb.com/character/ch%s/
imdbURL_character_main = imdbURL_character_base + 'ch%s/'
# http://akas.imdb.com/company/
imdbURL_company_base = '%scompany/' % imdbURL_base
# http://akas.imdb.com/company/co%s/
imdbURL_company_main = imdbURL_company_base + 'co%s/'
# http://akas.imdb.com/keyword/%s/
imdbURL_keyword_main = imdbURL_base + 'keyword/%s/'
# http://akas.imdb.com/chart/top
imdbURL_top250 = imdbURL_base + 'chart/top'
# http://akas.imdb.com/chart/bottom
imdbURL_bottom100 = imdbURL_base + 'chart/bottom'
# http://akas.imdb.com/find?%s
imdbURL_find = imdbURL_base + 'find?%s'
# Name of the configuration file.
confFileName = 'imdbpy.cfg'
class ConfigParserWithCase(ConfigParser.ConfigParser):
"""A case-sensitive parser for configuration files."""
def __init__(self, defaults=None, confFile=None, *args, **kwds):
"""Initialize the parser.
*defaults* -- defaults values.
*confFile* -- the file (or list of files) to parse."""
ConfigParser.ConfigParser.__init__(self, defaults=defaults)
if confFile is None:
dotFileName = '.' + confFileName
# Current and home directory.
confFile = [os.path.join(os.getcwd(), confFileName),
os.path.join(os.getcwd(), dotFileName),
os.path.join(os.path.expanduser('~'), confFileName),
os.path.join(os.path.expanduser('~'), dotFileName)]
if os.name == 'posix':
sep = getattr(os.path, 'sep', '/')
# /etc/ and /etc/conf.d/
confFile.append(os.path.join(sep, 'etc', confFileName))
confFile.append(os.path.join(sep, 'etc', 'conf.d',
confFileName))
else:
# etc subdirectory of sys.prefix, for non-unix systems.
confFile.append(os.path.join(sys.prefix, 'etc', confFileName))
for fname in confFile:
try:
self.read(fname)
except (ConfigParser.MissingSectionHeaderError,
ConfigParser.ParsingError), e:
_aux_logger.warn('Troubles reading config file: %s' % e)
# Stop at the first valid file.
if self.has_section('imdbpy'):
break
def optionxform(self, optionstr):
"""Option names are case sensitive."""
return optionstr
def _manageValue(self, value):
"""Custom substitutions for values."""
if not isinstance(value, (str, unicode)):
return value
vlower = value.lower()
if vlower in self._boolean_states:
return self._boolean_states[vlower]
elif vlower == 'none':
return None
return value
def get(self, section, option, *args, **kwds):
"""Return the value of an option from a given section."""
value = ConfigParser.ConfigParser.get(self, section, option,
*args, **kwds)
return self._manageValue(value)
def items(self, section, *args, **kwds):
"""Return a list of (key, value) tuples of items of the
given section."""
if section != 'DEFAULT' and not self.has_section(section):
return []
keys = ConfigParser.ConfigParser.options(self, section)
return [(k, self.get(section, k, *args, **kwds)) for k in keys]
def getDict(self, section):
"""Return a dictionary of items of the specified section."""
return dict(self.items(section))
def IMDb(accessSystem=None, *arguments, **keywords):
"""Return an instance of the appropriate class.
The accessSystem parameter is used to specify the kind of
the preferred access system."""
if accessSystem is None or accessSystem in ('auto', 'config'):
try:
cfg_file = ConfigParserWithCase(*arguments, **keywords)
# Parameters set by the code take precedence.
kwds = cfg_file.getDict('imdbpy')
if 'accessSystem' in kwds:
accessSystem = kwds['accessSystem']
del kwds['accessSystem']
else:
accessSystem = 'http'
kwds.update(keywords)
keywords = kwds
except Exception, e:
logging.getLogger('imdbpy').warn('Unable to read configuration' \
' file; complete error: %s' % e)
# It just LOOKS LIKE a bad habit: we tried to read config
# options from some files, but something is gone horribly
# wrong: ignore everything and pretend we were called with
# the 'http' accessSystem.
accessSystem = 'http'
if 'loggingLevel' in keywords:
imdb._logging.setLevel(keywords['loggingLevel'])
del keywords['loggingLevel']
if 'loggingConfig' in keywords:
logCfg = keywords['loggingConfig']
del keywords['loggingConfig']
try:
import logging.config
logging.config.fileConfig(os.path.expanduser(logCfg))
except Exception, e:
logging.getLogger('imdbpy').warn('unable to read logger ' \
'config: %s' % e)
if accessSystem in ('http', 'web', 'html'):
from parser.http import IMDbHTTPAccessSystem
return IMDbHTTPAccessSystem(*arguments, **keywords)
elif accessSystem in ('httpThin', 'webThin', 'htmlThin'):
import logging
logging.warn('httpThin is badly broken and' \
' will not be fixed; please switch' \
' to "http" or "mobile"')
from parser.http import IMDbHTTPAccessSystem
return IMDbHTTPAccessSystem(isThin=1, *arguments, **keywords)
elif accessSystem in ('mobile',):
from parser.mobile import IMDbMobileAccessSystem
return IMDbMobileAccessSystem(*arguments, **keywords)
elif accessSystem in ('local', 'files'):
# The local access system was removed since IMDbPY 4.2.
raise IMDbError, 'the local access system was removed since IMDbPY 4.2'
elif accessSystem in ('sql', 'db', 'database'):
try:
from parser.sql import IMDbSqlAccessSystem
except ImportError:
raise IMDbError, 'the sql access system is not installed'
return IMDbSqlAccessSystem(*arguments, **keywords)
else:
raise IMDbError, 'unknown kind of data access system: "%s"' \
% accessSystem
def available_access_systems():
"""Return the list of available data access systems."""
asList = []
# XXX: trying to import modules is a good thing?
try:
from parser.http import IMDbHTTPAccessSystem
asList += ['http', 'httpThin']
except ImportError:
pass
try:
from parser.mobile import IMDbMobileAccessSystem
asList.append('mobile')
except ImportError:
pass
try:
from parser.sql import IMDbSqlAccessSystem
asList.append('sql')
except ImportError:
pass
return asList
# XXX: I'm not sure this is a good guess.
# I suppose that an argument of the IMDb function can be used to
# set a default encoding for the output, and then Movie, Person and
# Character objects can use this default encoding, returning strings.
# Anyway, passing unicode strings to search_movie(), search_person()
# and search_character() methods is always safer.
encoding = getattr(sys.stdin, 'encoding', '') or sys.getdefaultencoding()
class IMDbBase:
"""The base class used to search for a movie/person/character and
to get a Movie/Person/Character object.
This class cannot directly fetch data of any kind and so you
have to search the "real" code into a subclass."""
# The name of the preferred access system (MUST be overridden
# in the subclasses).
accessSystem = 'UNKNOWN'
# Top-level logger for IMDbPY.
_imdb_logger = logging.getLogger('imdbpy')
def __init__(self, defaultModFunct=None, results=20, keywordsResults=100,
*arguments, **keywords):
"""Initialize the access system.
If specified, defaultModFunct is the function used by
default by the Person, Movie and Character objects, when
accessing their text fields.
"""
# The function used to output the strings that need modification (the
# ones containing references to movie titles and person names).
self._defModFunct = defaultModFunct
# Number of results to get.
try:
results = int(results)
except (TypeError, ValueError):
results = 20
if results < 1:
results = 20
self._results = results
try:
keywordsResults = int(keywordsResults)
except (TypeError, ValueError):
keywordsResults = 100
if keywordsResults < 1:
keywordsResults = 100
self._keywordsResults = keywordsResults
def _normalize_movieID(self, movieID):
"""Normalize the given movieID."""
# By default, do nothing.
return movieID
def _normalize_personID(self, personID):
"""Normalize the given personID."""
# By default, do nothing.
return personID
def _normalize_characterID(self, characterID):
"""Normalize the given characterID."""
# By default, do nothing.
return characterID
def _normalize_companyID(self, companyID):
"""Normalize the given companyID."""
# By default, do nothing.
return companyID
def _get_real_movieID(self, movieID):
"""Handle title aliases."""
# By default, do nothing.
return movieID
def _get_real_personID(self, personID):
"""Handle name aliases."""
# By default, do nothing.
return personID
def _get_real_characterID(self, characterID):
"""Handle character name aliases."""
# By default, do nothing.
return characterID
def _get_real_companyID(self, companyID):
"""Handle company name aliases."""
# By default, do nothing.
return companyID
def _get_infoset(self, prefname):
"""Return methods with the name starting with prefname."""
infoset = []
excludes = ('%sinfoset' % prefname,)
preflen = len(prefname)
for name in dir(self.__class__):
if name.startswith(prefname) and name not in excludes:
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
infoset.append(name[preflen:].replace('_', ' '))
return infoset
def get_movie_infoset(self):
"""Return the list of info set available for movies."""
return self._get_infoset('get_movie_')
def get_person_infoset(self):
"""Return the list of info set available for persons."""
return self._get_infoset('get_person_')
def get_character_infoset(self):
"""Return the list of info set available for characters."""
return self._get_infoset('get_character_')
def get_company_infoset(self):
"""Return the list of info set available for companies."""
return self._get_infoset('get_company_')
def get_movie(self, movieID, info=Movie.Movie.default_info, modFunct=None):
"""Return a Movie object for the given movieID.
The movieID is something used to univocally identify a movie;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Movie
object when accessing its text fields (like 'plot')."""
movieID = self._normalize_movieID(movieID)
movieID = self._get_real_movieID(movieID)
movie = Movie.Movie(movieID=movieID, accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
movie.set_mod_funct(modFunct)
self.update(movie, info)
return movie
get_episode = get_movie
def _search_movie(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def search_movie(self, title, results=None, _episodes=False):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(title, unicode):
title = unicode(title, encoding, 'replace')
if not _episodes:
res = self._search_movie(title, results)
else:
res = self._search_episode(title, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _search_episode(self, title, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def search_episode(self, title, results=None):
"""Return a list of Movie objects for a query for the given title.
The results argument is the maximum number of results to return;
this method searches only for titles of tv (mini) series' episodes."""
return self.search_movie(title, results=results, _episodes=True)
def get_person(self, personID, info=Person.Person.default_info,
modFunct=None):
"""Return a Person object for the given personID.
The personID is something used to univocally identify a person;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Person
object when accessing its text fields (like 'mini biography')."""
personID = self._normalize_personID(personID)
personID = self._get_real_personID(personID)
person = Person.Person(personID=personID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
person.set_mod_funct(modFunct)
self.update(person, info)
return person
def _search_person(self, name, results):
"""Return a list of tuples (personID, {personData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def search_person(self, name, results=None):
"""Return a list of Person objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_person(name, results)
return [Person.Person(personID=self._get_real_personID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_character(self, characterID, info=Character.Character.default_info,
modFunct=None):
"""Return a Character object for the given characterID.
The characterID is something used to univocally identify a character;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Character
object when accessing its text fields (like 'biography')."""
characterID = self._normalize_characterID(characterID)
characterID = self._get_real_characterID(characterID)
character = Character.Character(characterID=characterID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
character.set_mod_funct(modFunct)
self.update(character, info)
return character
def _search_character(self, name, results):
"""Return a list of tuples (characterID, {characterData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def search_character(self, name, results=None):
"""Return a list of Character objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_character(name, results)
return [Character.Character(characterID=self._get_real_characterID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def get_company(self, companyID, info=Company.Company.default_info,
modFunct=None):
"""Return a Company object for the given companyID.
The companyID is something used to univocally identify a company;
it can be the imdbID used by the IMDb web server, a file
pointer, a line number in a file, an ID in a database, etc.
info is the list of sets of information to retrieve.
If specified, modFunct will be the function used by the Company
object when accessing its text fields (none, so far)."""
companyID = self._normalize_companyID(companyID)
companyID = self._get_real_companyID(companyID)
company = Company.Company(companyID=companyID,
accessSystem=self.accessSystem)
modFunct = modFunct or self._defModFunct
if modFunct is not None:
company.set_mod_funct(modFunct)
self.update(company, info)
return company
def _search_company(self, name, results):
"""Return a list of tuples (companyID, {companyData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def search_company(self, name, results=None):
"""Return a list of Company objects for a query for the given name.
The results argument is the maximum number of results to return."""
if results is None:
results = self._results
try:
results = int(results)
except (ValueError, OverflowError):
results = 20
if not isinstance(name, unicode):
name = unicode(name, encoding, 'replace')
res = self._search_company(name, results)
return [Company.Company(companyID=self._get_real_companyID(pi),
data=pd, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for pi, pd in res][:results]
def _search_keyword(self, keyword, results):
"""Return a list of 'keyword' strings."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def search_keyword(self, keyword, results=None):
"""Search for existing keywords, similar to the given one."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
return self._search_keyword(keyword, results)
def _get_keyword(self, keyword, results):
"""Return a list of tuples (movieID, {movieData})"""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def get_keyword(self, keyword, results=None):
"""Return a list of movies for the given keyword."""
if results is None:
results = self._keywordsResults
try:
results = int(results)
except (ValueError, OverflowError):
results = 100
# XXX: I suppose it will be much safer if the user provides
# an unicode string... this is just a guess.
if not isinstance(keyword, unicode):
keyword = unicode(keyword, encoding, 'replace')
res = self._get_keyword(keyword, results)
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res][:results]
def _get_top_bottom_movies(self, kind):
"""Return the list of the top 250 or bottom 100 movies."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
# This method must return a list of (movieID, {movieDict})
# tuples. The kind parameter can be 'top' or 'bottom'.
raise NotImplementedError, 'override this method'
def get_top250_movies(self):
"""Return the list of the top 250 movies."""
res = self._get_top_bottom_movies('top')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def get_bottom100_movies(self):
"""Return the list of the bottom 100 movies."""
res = self._get_top_bottom_movies('bottom')
return [Movie.Movie(movieID=self._get_real_movieID(mi),
data=md, modFunct=self._defModFunct,
accessSystem=self.accessSystem) for mi, md in res]
def new_movie(self, *arguments, **keywords):
"""Return a Movie object."""
# XXX: not really useful...
if 'title' in keywords:
if not isinstance(keywords['title'], unicode):
keywords['title'] = unicode(keywords['title'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Movie.Movie(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_person(self, *arguments, **keywords):
"""Return a Person object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Person.Person(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_character(self, *arguments, **keywords):
"""Return a Character object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Character.Character(accessSystem=self.accessSystem,
*arguments, **keywords)
def new_company(self, *arguments, **keywords):
"""Return a Company object."""
# XXX: not really useful...
if 'name' in keywords:
if not isinstance(keywords['name'], unicode):
keywords['name'] = unicode(keywords['name'],
encoding, 'replace')
elif len(arguments) > 1:
if not isinstance(arguments[1], unicode):
arguments[1] = unicode(arguments[1], encoding, 'replace')
return Company.Company(accessSystem=self.accessSystem,
*arguments, **keywords)
def update(self, mop, info=None, override=0):
"""Given a Movie, Person, Character or Company object with only
partial information, retrieve the required set of information.
info is the list of sets of information to retrieve.
If override is set, the information are retrieved and updated
even if they're already in the object."""
# XXX: should this be a method of the Movie/Person/Character/Company
# classes? NO! What for instances created by external functions?
mopID = None
prefix = ''
if isinstance(mop, Movie.Movie):
mopID = mop.movieID
prefix = 'movie'
elif isinstance(mop, Person.Person):
mopID = mop.personID
prefix = 'person'
elif isinstance(mop, Character.Character):
mopID = mop.characterID
prefix = 'character'
elif isinstance(mop, Company.Company):
mopID = mop.companyID
prefix = 'company'
else:
raise IMDbError, 'object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance'
if mopID is None:
# XXX: enough? It's obvious that there are Characters
# objects without characterID, so I think they should
# just do nothing, when an i.update(character) is tried.
if prefix == 'character':
return
raise IMDbDataAccessError, \
'the supplied object has null movieID, personID or companyID'
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if info is None:
info = mop.default_info
elif info == 'all':
if isinstance(mop, Movie.Movie):
info = self.get_movie_infoset()
elif isinstance(mop, Person.Person):
info = self.get_person_infoset()
elif isinstance(mop, Character.Character):
info = self.get_character_infoset()
else:
info = self.get_company_infoset()
if not isinstance(info, (tuple, list)):
info = (info,)
res = {}
for i in info:
if i in mop.current_info and not override:
continue
if not i:
continue
self._imdb_logger.debug('retrieving "%s" info set', i)
try:
method = getattr(aSystem, 'get_%s_%s' %
(prefix, i.replace(' ', '_')))
except AttributeError:
self._imdb_logger.error('unknown information set "%s"', i)
# Keeps going.
method = lambda *x: {}
try:
ret = method(mopID)
except Exception, e:
self._imdb_logger.critical('caught an exception retrieving ' \
'or parsing "%s" info set for mopID ' \
'"%s" (accessSystem: %s)',
i, mopID, mop.accessSystem, exc_info=True)
ret = {}
keys = None
if 'data' in ret:
res.update(ret['data'])
if isinstance(ret['data'], dict):
keys = ret['data'].keys()
if 'info sets' in ret:
for ri in ret['info sets']:
mop.add_to_current_info(ri, keys, mainInfoset=i)
else:
mop.add_to_current_info(i, keys)
if 'titlesRefs' in ret:
mop.update_titlesRefs(ret['titlesRefs'])
if 'namesRefs' in ret:
mop.update_namesRefs(ret['namesRefs'])
if 'charactersRefs' in ret:
mop.update_charactersRefs(ret['charactersRefs'])
mop.set_data(res, override=0)
def get_imdbMovieID(self, movieID):
"""Translate a movieID in an imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def get_imdbPersonID(self, personID):
"""Translate a personID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def get_imdbCharacterID(self, characterID):
"""Translate a characterID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def get_imdbCompanyID(self, companyID):
"""Translate a companyID in a imdbID (the ID used by the IMDb
web server); must be overridden by the subclass."""
# XXX: for the real implementation, see the method of the
# subclass, somewhere under the imdb.parser package.
raise NotImplementedError, 'override this method'
def _searchIMDb(self, kind, ton):
"""Search the IMDb akas server for the given title or name."""
# The Exact Primary search system has gone AWOL, so we resort
# to the mobile search. :-/
if not ton:
return None
aSystem = IMDb('mobile')
if kind == 'tt':
searchFunct = aSystem.search_movie
check = 'long imdb canonical title'
elif kind == 'nm':
searchFunct = aSystem.search_person
check = 'long imdb canonical name'
elif kind == 'char':
searchFunct = aSystem.search_character
check = 'long imdb canonical name'
elif kind == 'co':
# XXX: are [COUNTRY] codes included in the results?
searchFunct = aSystem.search_company
check = 'long imdb name'
try:
searchRes = searchFunct(ton)
except IMDbError:
return None
# When only one result is returned, assume it was from an
# exact match.
if len(searchRes) == 1:
return searchRes[0].getID()
for item in searchRes:
# Return the first perfect match.
if item[check] == ton:
return item.getID()
return None
def title2imdbID(self, title):
"""Translate a movie title (in the plain text data files format)
to an imdbID.
Try an Exact Primary Title search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('tt', title)
def name2imdbID(self, name):
"""Translate a person name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('tt', name)
def character2imdbID(self, name):
"""Translate a character name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('char', name)
def company2imdbID(self, name):
"""Translate a company name in an imdbID.
Try an Exact Primary Name search on IMDb;
return None if it's unable to get the imdbID."""
return self._searchIMDb('co', name)
def get_imdbID(self, mop):
"""Return the imdbID for the given Movie, Person, Character or Company
object."""
imdbID = None
if mop.accessSystem == self.accessSystem:
aSystem = self
else:
aSystem = IMDb(mop.accessSystem)
if isinstance(mop, Movie.Movie):
if mop.movieID is not None:
imdbID = aSystem.get_imdbMovieID(mop.movieID)
else:
imdbID = aSystem.title2imdbID(build_title(mop, canonical=0,
ptdf=1))
elif isinstance(mop, Person.Person):
if mop.personID is not None:
imdbID = aSystem.get_imdbPersonID(mop.personID)
else:
imdbID = aSystem.name2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Character.Character):
if mop.characterID is not None:
imdbID = aSystem.get_imdbCharacterID(mop.characterID)
else:
# canonical=0 ?
imdbID = aSystem.character2imdbID(build_name(mop, canonical=1))
elif isinstance(mop, Company.Company):
if mop.companyID is not None:
imdbID = aSystem.get_imdbCompanyID(mop.companyID)
else:
imdbID = aSystem.company2imdbID(build_company_name(mop))
else:
raise IMDbError, 'object ' + repr(mop) + \
' is not a Movie, Person or Character instance'
return imdbID
def get_imdbURL(self, mop):
"""Return the main IMDb URL for the given Movie, Person,
Character or Company object, or None if unable to get it."""
imdbID = self.get_imdbID(mop)
if imdbID is None:
return None
if isinstance(mop, Movie.Movie):
url_firstPart = imdbURL_movie_main
elif isinstance(mop, Person.Person):
url_firstPart = imdbURL_person_main
elif isinstance(mop, Character.Character):
url_firstPart = imdbURL_character_main
elif isinstance(mop, Company.Company):
url_firstPart = imdbURL_company_main
else:
raise IMDbError, 'object ' + repr(mop) + \
' is not a Movie, Person, Character or Company instance'
return url_firstPart % imdbID
def get_special_methods(self):
"""Return the special methods defined by the subclass."""
sm_dict = {}
base_methods = []
for name in dir(IMDbBase):
member = getattr(IMDbBase, name)
if isinstance(member, MethodType):
base_methods.append(name)
for name in dir(self.__class__):
if name.startswith('_') or name in base_methods or \
name.startswith('get_movie_') or \
name.startswith('get_person_') or \
name.startswith('get_company_') or \
name.startswith('get_character_'):
continue
member = getattr(self.__class__, name)
if isinstance(member, MethodType):
sm_dict.update({name: member.__doc__})
return sm_dict
| Python |
"""
company module (imdb package).
This module provides the company class, used to store information about
a given company.
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_company_name, build_company_name, \
flatten, _Container, cmpCompanies
class Company(_Container):
"""A company.
Every information about a company can be accessed as:
companyObject['information']
to get a list of the kind of information stored in a
company object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main',)
# Aliases for some not-so-intuitive keys.
keys_alias = {
'distributor': 'distributors',
'special effects company': 'special effects companies',
'other company': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'production company': 'production companies'}
keys_tomodify_list = ()
cmpFunct = cmpCompanies
def _init(self, **kwds):
"""Initialize a company object.
*companyID* -- the unique identifier for the company.
*name* -- the name of the company, if not in the data dictionary.
*myName* -- the nickname you use for this company.
*myID* -- your personal id for this company.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given company.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to companies.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.companyID = kwds.get('companyID', None)
self.myName = kwds.get('myName', u'')
def _reset(self):
"""Reset the company object."""
self.companyID = None
self.myName = u''
def set_name(self, name):
"""Set the name of the company."""
# XXX: convert name to unicode, if it's a plain string?
# Company diverges a bit from other classes, being able
# to directly handle its "notes". AND THAT'S PROBABLY A BAD IDEA!
oname = name = name.strip()
notes = u''
if name.endswith(')'):
fparidx = name.find('(')
if fparidx != -1:
notes = name[fparidx:]
name = name[:fparidx].rstrip()
if self.notes:
name = oname
d = analyze_company_name(name)
self.data.update(d)
if notes and not self.notes:
self.notes = notes
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
if self.data.has_key('name'):
return ['long imdb name']
return []
def _getitem(self, key):
"""Handle special keys."""
## XXX: can a company have an imdbIndex?
if self.data.has_key('name'):
if key == 'long imdb name':
return build_company_name(self.data)
return None
def getID(self):
"""Return the companyID."""
return self.companyID
def __nonzero__(self):
"""The company is "false" if the self.data does not contain a name."""
# XXX: check the name and the companyID?
if self.data.get('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this company and the given Movie are related."""
from Movie import Movie
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
return 0
def isSameName(self, other):
"""Return true if two company have the same name
and/or companyID."""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_company_name(self.data) == \
build_company_name(other.data):
return 1
if self.accessSystem == other.accessSystem and \
self.companyID is not None and \
self.companyID == other.companyID:
return 1
return 0
isSameCompany = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a company instance."""
c = Company(name=u'', companyID=self.companyID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Company object."""
r = '<Company id:%s[%s] name:_%s_>' % (self.companyID,
self.accessSystem,
self.get('long imdb name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the company."""
if not self: return u''
s = u'Company\n=======\nName: %s\n' % \
self.get('name', u'')
for k in ('distributor', 'production company', 'miscellaneous company',
'special effects company'):
d = self.get(k, [])[:5]
if not d: continue
s += u'Last movies from this company (%s): %s.\n' % \
(k, u'; '.join([x.get('long imdb title', u'') for x in d]))
return s
| Python |
#!/usr/bin/env python
import os
import sys
import ez_setup
ez_setup.use_setuptools()
import setuptools
# version of the software; in the code repository this represents
# the _next_ release. setuptools will automatically add 'dev-rREVISION'.
version = '4.7'
home_page = 'http://imdbpy.sf.net/'
long_desc = """IMDbPY is a Python package useful to retrieve and
manage the data of the IMDb movie database about movies, people,
characters and companies.
Platform-independent and written in pure Python (and few C lines),
it can retrieve data from both the IMDb's web server and a local copy
of the whole database.
IMDbPY package can be very easily used by programmers and developers
to provide access to the IMDb's data to their programs.
Some simple example scripts - useful for the end users - are included
in this package; other IMDbPY-based programs are available at the
home page: %s
""" % home_page
dwnl_url = 'http://imdbpy.sf.net/?page=download'
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Environment :: Web Environment
Environment :: Handhelds/PDA's
Intended Audience :: Developers
Intended Audience :: End Users/Desktop
License :: OSI Approved :: GNU General Public License (GPL)
Natural Language :: English
Natural Language :: Italian
Natural Language :: Turkish
Programming Language :: Python
Programming Language :: C
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries
Topic :: Software Development :: Libraries :: Python Modules
"""
keywords = ['imdb', 'movie', 'people', 'database', 'cinema', 'film', 'person',
'cast', 'actor', 'actress', 'director', 'sql', 'character',
'company', 'package', 'plain text data files',
'keywords', 'top250', 'bottom100', 'xml']
cutils = setuptools.Extension('imdb.parser.sql.cutils',
['imdb/parser/sql/cutils.c'])
scripts = ['./bin/get_first_movie.py',
'./bin/get_movie.py', './bin/search_movie.py',
'./bin/get_first_person.py', './bin/get_person.py',
'./bin/search_person.py', './bin/get_character.py',
'./bin/get_first_character.py', './bin/get_company.py',
'./bin/search_character.py', './bin/search_company.py',
'./bin/get_first_company.py', './bin/get_keyword.py',
'./bin/search_keyword.py', './bin/get_top_bottom_movies.py']
# XXX: I'm not sure that 'etc' is a good idea. Making it an absolute
# path seems a recipe for a disaster (with bdist_egg, at least).
data_files = [('doc', setuptools.findall('docs')), ('etc', ['docs/imdbpy.cfg'])]
# Defining these 'features', it's possible to run commands like:
# python ./setup.py --without-sql bdist
# having (in this example) imdb.parser.sql removed.
featCutils = setuptools.dist.Feature('compile the C module', standard=True,
ext_modules=[cutils])
featLxml = setuptools.dist.Feature('add lxml dependency', standard=True,
install_requires=['lxml'])
# XXX: it seems there's no way to specify that we need EITHER
# SQLObject OR SQLAlchemy.
featSQLObject = setuptools.dist.Feature('add SQLObject dependency',
standard=True, install_requires=['SQLObject'],
require_features='sql')
featSQLAlchemy = setuptools.dist.Feature('add SQLAlchemy dependency',
standard=True, install_requires=['SQLAlchemy', 'sqlalchemy-migrate'],
require_features='sql')
sqlScripts = ['./bin/imdbpy2sql.py']
# standard=False so that it's not installed if both --without-sqlobject
# and --without-sqlalchemy are specified.
featSQL = setuptools.dist.Feature('access to SQL databases', standard=False,
remove='imdb.parser.sql', scripts=sqlScripts)
features = {
'cutils': featCutils,
'sql': featSQL,
'lxml': featLxml,
'sqlobject': featSQLObject,
'sqlalchemy': featSQLAlchemy
}
params = {
# Meta-information.
'name': 'IMDbPY',
'version': version,
'description': 'Python package to access the IMDb\'s database',
'long_description': long_desc,
'author': 'Davide Alberani',
'author_email': 'da@erlug.linux.it',
'contact': 'IMDbPY-devel mailing list',
'contact_email': 'imdbpy-devel@lists.sourceforge.net',
'maintainer': 'Davide Alberani',
'maintainer_email': 'da@erlug.linux.it',
'license': 'GPL',
'platforms': 'any',
'keywords': keywords,
'classifiers': filter(None, classifiers.split("\n")),
'zip_safe': False, # XXX: I guess, at least...
# Download URLs.
'url': home_page,
'download_url': dwnl_url,
# Scripts.
'scripts': scripts,
# Documentation files.
'data_files': data_files,
# C extensions.
#'ext_modules': [cutils],
# Requirements. XXX: maybe we can use extras_require?
#'install_requires': install_requires,
#'extras_require': extras_require,
'features': features,
# Packages.
'packages': setuptools.find_packages()
}
ERR_MSG = """
====================================================================
ERROR
=====
Aaargh! An error! An error!
Curse my metal body, I wasn't fast enough. It's all my fault!
Anyway, if you were trying to build a package or install IMDbPY to your
system, looks like we're unable to fetch or install some dependencies,
or to compile the C module.
The best solution is to resolve these dependencies (maybe you're
not connected to Internet?) and/or install a C compiler.
You may, however, go on without some optional pieces of IMDbPY;
try re-running this script with the corresponding optional argument:
--without-lxml exclude lxml (speeds up 'http')
--without-cutils don't compile the C module (speeds up 'sql')
--without-sqlobject exclude SQLObject (you need at least one of)
--without-sqlalchemy exclude SQLAlchemy (SQLObject or SQLAlchemy,)
(if you want to access a )
(local SQL database )
--without-sql no access to SQL databases (implied if both
--without-sqlobject and --without-sqlalchemy
are used)
Example:
python ./setup.py --without-lxml --without-sql install
The caught exception, is re-raise below:
"""
REBUILDMO_DIR = os.path.join('imdb', 'locale')
REBUILDMO_NAME = 'rebuildmo'
def runRebuildmo():
"""Call the function to rebuild the locales."""
cwd = os.getcwd()
import sys
path = list(sys.path)
languages = []
try:
import imp
scriptPath = os.path.dirname(__file__)
modulePath = os.path.join(cwd, scriptPath, REBUILDMO_DIR)
sys.path += [modulePath, '.', cwd]
modInfo = imp.find_module(REBUILDMO_NAME, [modulePath, '.', cwd])
rebuildmo = imp.load_module('rebuildmo', *modInfo)
os.chdir(modulePath)
languages = rebuildmo.rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
except Exception, e:
print 'ERROR: unable to rebuild .mo files; caught exception %s' % e
sys.path = path
os.chdir(cwd)
return languages
def hasCommand():
"""Return true if at least one command is found on the command line."""
args = sys.argv[1:]
if '--help' in args:
return False
if '-h' in args:
return False
for arg in args:
if arg and not arg.startswith('-'):
return True
return False
try:
if hasCommand():
languages = runRebuildmo()
else:
languages = []
if languages:
data_files.append(('imdb/locale', ['imdb/locale/imdbpy.pot']))
for lang in languages:
files_found = setuptools.findall('imdb/locale/%s' % lang)
if not files_found:
continue
base_dir = os.path.dirname(files_found[0])
data_files.append(('imdb/locale', ['imdb/locale/imdbpy-%s.po' % lang]))
if not base_dir:
continue
data_files.append((base_dir, files_found))
setuptools.setup(**params)
except SystemExit:
print ERR_MSG
raise
| Python |
#!/usr/bin/env python
import os
import sys
import ez_setup
ez_setup.use_setuptools()
import setuptools
# version of the software; in the code repository this represents
# the _next_ release. setuptools will automatically add 'dev-rREVISION'.
version = '4.7'
home_page = 'http://imdbpy.sf.net/'
long_desc = """IMDbPY is a Python package useful to retrieve and
manage the data of the IMDb movie database about movies, people,
characters and companies.
Platform-independent and written in pure Python (and few C lines),
it can retrieve data from both the IMDb's web server and a local copy
of the whole database.
IMDbPY package can be very easily used by programmers and developers
to provide access to the IMDb's data to their programs.
Some simple example scripts - useful for the end users - are included
in this package; other IMDbPY-based programs are available at the
home page: %s
""" % home_page
dwnl_url = 'http://imdbpy.sf.net/?page=download'
classifiers = """\
Development Status :: 5 - Production/Stable
Environment :: Console
Environment :: Web Environment
Environment :: Handhelds/PDA's
Intended Audience :: Developers
Intended Audience :: End Users/Desktop
License :: OSI Approved :: GNU General Public License (GPL)
Natural Language :: English
Natural Language :: Italian
Natural Language :: Turkish
Programming Language :: Python
Programming Language :: C
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries
Topic :: Software Development :: Libraries :: Python Modules
"""
keywords = ['imdb', 'movie', 'people', 'database', 'cinema', 'film', 'person',
'cast', 'actor', 'actress', 'director', 'sql', 'character',
'company', 'package', 'plain text data files',
'keywords', 'top250', 'bottom100', 'xml']
cutils = setuptools.Extension('imdb.parser.sql.cutils',
['imdb/parser/sql/cutils.c'])
scripts = ['./bin/get_first_movie.py',
'./bin/get_movie.py', './bin/search_movie.py',
'./bin/get_first_person.py', './bin/get_person.py',
'./bin/search_person.py', './bin/get_character.py',
'./bin/get_first_character.py', './bin/get_company.py',
'./bin/search_character.py', './bin/search_company.py',
'./bin/get_first_company.py', './bin/get_keyword.py',
'./bin/search_keyword.py', './bin/get_top_bottom_movies.py']
# XXX: I'm not sure that 'etc' is a good idea. Making it an absolute
# path seems a recipe for a disaster (with bdist_egg, at least).
data_files = [('doc', setuptools.findall('docs')), ('etc', ['docs/imdbpy.cfg'])]
# Defining these 'features', it's possible to run commands like:
# python ./setup.py --without-sql bdist
# having (in this example) imdb.parser.sql removed.
featCutils = setuptools.dist.Feature('compile the C module', standard=True,
ext_modules=[cutils])
featLxml = setuptools.dist.Feature('add lxml dependency', standard=True,
install_requires=['lxml'])
# XXX: it seems there's no way to specify that we need EITHER
# SQLObject OR SQLAlchemy.
featSQLObject = setuptools.dist.Feature('add SQLObject dependency',
standard=True, install_requires=['SQLObject'],
require_features='sql')
featSQLAlchemy = setuptools.dist.Feature('add SQLAlchemy dependency',
standard=True, install_requires=['SQLAlchemy', 'sqlalchemy-migrate'],
require_features='sql')
sqlScripts = ['./bin/imdbpy2sql.py']
# standard=False so that it's not installed if both --without-sqlobject
# and --without-sqlalchemy are specified.
featSQL = setuptools.dist.Feature('access to SQL databases', standard=False,
remove='imdb.parser.sql', scripts=sqlScripts)
features = {
'cutils': featCutils,
'sql': featSQL,
'lxml': featLxml,
'sqlobject': featSQLObject,
'sqlalchemy': featSQLAlchemy
}
params = {
# Meta-information.
'name': 'IMDbPY',
'version': version,
'description': 'Python package to access the IMDb\'s database',
'long_description': long_desc,
'author': 'Davide Alberani',
'author_email': 'da@erlug.linux.it',
'contact': 'IMDbPY-devel mailing list',
'contact_email': 'imdbpy-devel@lists.sourceforge.net',
'maintainer': 'Davide Alberani',
'maintainer_email': 'da@erlug.linux.it',
'license': 'GPL',
'platforms': 'any',
'keywords': keywords,
'classifiers': filter(None, classifiers.split("\n")),
'zip_safe': False, # XXX: I guess, at least...
# Download URLs.
'url': home_page,
'download_url': dwnl_url,
# Scripts.
'scripts': scripts,
# Documentation files.
'data_files': data_files,
# C extensions.
#'ext_modules': [cutils],
# Requirements. XXX: maybe we can use extras_require?
#'install_requires': install_requires,
#'extras_require': extras_require,
'features': features,
# Packages.
'packages': setuptools.find_packages()
}
ERR_MSG = """
====================================================================
ERROR
=====
Aaargh! An error! An error!
Curse my metal body, I wasn't fast enough. It's all my fault!
Anyway, if you were trying to build a package or install IMDbPY to your
system, looks like we're unable to fetch or install some dependencies,
or to compile the C module.
The best solution is to resolve these dependencies (maybe you're
not connected to Internet?) and/or install a C compiler.
You may, however, go on without some optional pieces of IMDbPY;
try re-running this script with the corresponding optional argument:
--without-lxml exclude lxml (speeds up 'http')
--without-cutils don't compile the C module (speeds up 'sql')
--without-sqlobject exclude SQLObject (you need at least one of)
--without-sqlalchemy exclude SQLAlchemy (SQLObject or SQLAlchemy,)
(if you want to access a )
(local SQL database )
--without-sql no access to SQL databases (implied if both
--without-sqlobject and --without-sqlalchemy
are used)
Example:
python ./setup.py --without-lxml --without-sql install
The caught exception, is re-raise below:
"""
REBUILDMO_DIR = os.path.join('imdb', 'locale')
REBUILDMO_NAME = 'rebuildmo'
def runRebuildmo():
"""Call the function to rebuild the locales."""
cwd = os.getcwd()
import sys
path = list(sys.path)
languages = []
try:
import imp
scriptPath = os.path.dirname(__file__)
modulePath = os.path.join(cwd, scriptPath, REBUILDMO_DIR)
sys.path += [modulePath, '.', cwd]
modInfo = imp.find_module(REBUILDMO_NAME, [modulePath, '.', cwd])
rebuildmo = imp.load_module('rebuildmo', *modInfo)
os.chdir(modulePath)
languages = rebuildmo.rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
except Exception, e:
print 'ERROR: unable to rebuild .mo files; caught exception %s' % e
sys.path = path
os.chdir(cwd)
return languages
def hasCommand():
"""Return true if at least one command is found on the command line."""
args = sys.argv[1:]
if '--help' in args:
return False
if '-h' in args:
return False
for arg in args:
if arg and not arg.startswith('-'):
return True
return False
try:
if hasCommand():
languages = runRebuildmo()
else:
languages = []
if languages:
data_files.append(('imdb/locale', ['imdb/locale/imdbpy.pot']))
for lang in languages:
files_found = setuptools.findall('imdb/locale/%s' % lang)
if not files_found:
continue
base_dir = os.path.dirname(files_found[0])
data_files.append(('imdb/locale', ['imdb/locale/imdbpy-%s.po' % lang]))
if not base_dir:
continue
data_files.append((base_dir, files_found))
setuptools.setup(**params)
except SystemExit:
print ERR_MSG
raise
| Python |
#!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| Python |
"""
searchPerson.py
Usage: search_person "person name"
Search for the given name and print the results.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You need to install the IMDbPY package!'
sys.exit(1)
name = "arnold"
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
name = unicode(name, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of Person objects).
results = i.search_person(name)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
# Print the results.
print ' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
name.encode(out_encoding, 'replace'))
print 'personID\t: imdbID : name'
# Print the long imdb name for every person.
for person in results:
outp = u'%s\t: %s : %s' % (person.personID, i.get_imdbID(person),
person['long imdb name'])
print outp.encode(out_encoding, 'replace')
| Python |
"""
searchMovie.py
Usage: search_movie "movie title"
Search for the given title and print the results.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You need to install the IMDbPY package!'
sys.exit(1)
title = "The Matrix"
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
title = unicode(title, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of Movie objects).
results = i.search_movie(title)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
# Print the results.
print ' %s result%s for "%s":' % (len(results),
('', 's')[len(results) != 1],
title.encode(out_encoding, 'replace'))
print 'movieID\t: imdbID : title'
# Print the long imdb title for every movie.
for movie in results:
outp = u'%s\t: %s : %s' % (movie.movieID, i.get_imdbID(movie),
movie['long imdb title'])
print outp.encode(out_encoding, 'replace')
####################################################################################
#if we want to check if movie with exact such title exist in the system
print "here all movies that have exact such title as 'title':"
for movie in results:
if (movie['title']==title):
outp = u'%s\t: %s : %s' % (movie.movieID, i.get_imdbID(movie),
movie['long imdb title'])
print outp.encode(out_encoding, 'replace')
| Python |
"""
topMovies.py
Prints top 10 movies, by ratings.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You need to install the IMDbPY package!'
sys.exit(1)
def unic(string):
try:
print string
except UnicodeEncodeError:
print 'bad movie title'
i = imdb.IMDb()
top250 = i.get_top250_movies()
print ''
print 'top 250 movies'
print 'rating\tvotes\ttitle'
for movie in top250:
movie.get('title')
unic('%s\t%s\t%s' % (movie.get('rating'), movie.get('votes'),
movie['long imdb title']))
| Python |
"""
getLinkToMovie.py
Usage: search_movie "movie title"
Search for the given title and print the results.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You need to install the IMDbPY package!'
sys.exit(1)
title = "The Terminator"
i = imdb.IMDb()
in_encoding = sys.stdin.encoding or sys.getdefaultencoding()
out_encoding = sys.stdout.encoding or sys.getdefaultencoding()
title = unicode(title, in_encoding, 'replace')
try:
# Do the search, and get the results (a list of Movie objects).
results = i.search_movie(title)
except imdb.IMDbError, e:
print "Probably you're not connected to Internet. Complete error report:"
print e
sys.exit(3)
for movie in results:
outp = u'%s\t: %s : %s' % (movie.movieID, i.get_imdbID(movie),
movie['long imdb title'])
print outp.encode(out_encoding, 'replace')
imdbURL = i.get_imdbURL(movie)
if imdbURL:
print 'IMDb URL: %s' % imdbURL
| Python |
'''
Created on 21/04/2011
@author: Eran_Z
'''
| Python |
'''
Created on 29/03/2011
@author: Eran_Z
Scoring
'''
from search_m import searchSingle, searchTogether
from util_m import sum, BadMovie
#Helper functions:
def __normalizedSigmaMutualWeightHelper(w, ci, wi):
ss = searchSingle(w)
st = searchTogether(w, ci)
#return 0 if ss < st else st*wi*1.0/ss
if ss < st:
raise BadMovie
return st*wi*1.0/ss
def __normalizedSigmaMutualWeight(w, context, weights):
try:
return reduce(sum, map(lambda i:__normalizedSigmaMutualWeightHelper(w, context[i], weights[i]), range(len(context))))
except BadMovie:
return 0
########################################################
########################################################
#Main functions:
def normalizedMutualInformationScorer(context, weights, world):
return map(lambda w:__normalizedSigmaMutualWeight(w, context, weights), world)
#------------- UNUSED EXTENSIONS --------------------
from search_m import searchExclusion, NGD
def __sigmaMutualWeight(w, context, weights):
return reduce(sum, map(lambda i:searchTogether(w, context[i])*weights[i], range(len(context))))
def basicScorer(context, weights, world):
#Note: uses searchExclusion, which is uncached...
return map(lambda w:__sigmaMutualWeight(w, context, weights)*1.0/(searchSingle(w)-searchExclusion(w, context)), world)
def NGD1Scorer(context, weights, world):
return 1.0/(reduce(sum, map(lambda i:NGD(world[i], context[i])*weights[i], range(len(context)))))
def NGD2Scorer(context, weights, world):
return reduce(sum, map(lambda i:1.0/(NGD(world[i], context[i])*weights[i]), range(len(context))))
def regularMutualInformationScorer(context, weights, world):
return map(lambda w:__sigmaMutualWeight(w, context, weights), world)
#-----------------------------------------
scoringAlgorithms = {"Basic": basicScorer, "NGD Type 1": NGD1Scorer, "NGD Type 2": NGD2Scorer,
"Regular Mutual Information": regularMutualInformationScorer,
"Normalized Mutual Information": normalizedMutualInformationScorer }
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.db import models
class Movie(models.Model):
title = models.CharField(max_length=200, primary_key=True)
year = models.PositiveSmallIntegerField()
searchPair = models.ManyToManyField('self', symmetrical=False, blank=True, through='SearchResult')
searchSingle = models.BigIntegerField()
link = models.CharField(max_length=256)
def __unicode__(self):
return '%s (%s)' % (self.title, self.year)
class SearchResult(models.Model):
movie1 = models.ForeignKey(Movie, related_name='searchresult_set1')
movie2 = models.ForeignKey(Movie, related_name='searchresult_set2')
numResults = models.BigIntegerField()
def __unicode__(self):
name1 = getattr(self.movie1, 'title')
name2 = getattr(self.movie2, 'title')
# can also use: getattr(self, 'movie1').__unicode__()
return '"%s" & "%s"' % (name1, name2)
| Python |
"""
Test Movies module
"""
from django.test import TestCase
from RSM.my_friends.models import RSMUser, User
from RSM.my_friends.views import getUserNames
from RSM.my_movies.views import __updateUserMoviesList
from RSM.util import getRSMUser
from RSM.algorithm.search.makeCache import makeTestCache
getUserNames = getUserNames
updateUserMoviesList = __updateUserMoviesList
getRSMUser = getRSMUser
makeTestCache = makeTestCache
class MoviesTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nMovies Test" + str(MoviesTest.testCount) + "\n"
if (MoviesTest.added):
return
#adding a user to the system
user = User.objects.create_user("Tomer", "t@t.com", "1234")
user.save()
RSMUser(user=user).save()
#add some movies to the system
makeTestCache()
def tearDown(self):
print "\nMovies Test" + str(MoviesTest.testCount) + "\n"
MoviesTest.testCount += 1
def test_1_addMovieToSeen(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.seen.all()),3)
print "There are 3 movies in seen"
def test_2_addMovieToLikes(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov3","Mov1"]
username = "Tomer"
movieType = "Likes"
updateUserMoviesList(username,selectedList,movieType)
print "Added 2 movies to likes"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.likes.all()),2)
print "There are 2 movies in likes"
def test_3_addMovieToBlacklist(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov2"]
username = "Tomer"
movieType = "Blacklist"
updateUserMoviesList(username,selectedList,movieType)
print "Added 1 movie to blacklist"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.blacklist.all()),1)
print "There is 1 movie in blacklist"
def test_4_addUnSeenMovieToBlacklist(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov5"]
username = "Tomer"
movieType = "Blacklist"
updateUserMoviesList(username,selectedList,movieType)
print "Added 1 movie to blacklist, movie not in seen"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.blacklist.all()),1)
print "There is 1 movie in blacklist"
def test_5_addUnSeenMovieToLikes(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov5"]
username = "Tomer"
movieType = "Likes"
updateUserMoviesList(username,selectedList,movieType)
print "Added 1 movie to likes"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.likes.all()),1)
print "There is 1 movie in likes" | Python |
'''
Created on Apr 29, 2011
@author: shurok
'''
from django.conf.urls.defaults import patterns
urlpatterns = patterns('my_movies.views',
(r'^viewMovieTypes/viewMovies/$','viewMovies'),
(r'^viewMovieTypes/$','viewMovieTypes'),
) | Python |
'''
Created on 21/04/2011
@author: Eran_Z
'''
from django.contrib import admin
from models import Movie, SearchResult
admin.site.register(Movie)
admin.site.register(SearchResult)
| Python |
from django.template import RequestContext
from django.shortcuts import render_to_response
from models import Movie
from RSM.util import getRSMUser, verifySessionValidity, printDebug, getTitle
def viewMovieTypes(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
__updateUserMoviesList(request.user.username, request.POST.getlist(u'selectedList'), request.POST.get(u'movieType'))
return render_to_response("my_movies/movieTypes.html",context_instance=RequestContext(request))
def viewMovies(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
movieType = request.POST["movieType"]
movieNames = []
currentUser = getRSMUser(request.user.username)
allMovies = set(Movie.objects.all())
if movieType == "Seen":
movieNames = [u.title for u in currentUser.seen.all()]
elif movieType == "Likes":
movieNames = [u.title for u in currentUser.likes.all()]
allMovies -= set(currentUser.blacklist.all())
elif movieType == "Blacklist":
movieNames = [u.title for u in currentUser.blacklist.all()]
allMovies -= set(currentUser.likes.all())
else:
printDebug('BUG in my_movies view: viewMovies')
variables = {"movienames": movieNames, "movietype": movieType, "allmovies": map(getTitle, allMovies)}
return render_to_response("my_movies/viewmovies.html", variables, context_instance=RequestContext(request))
def __updateUserMoviesList(username, selectedList, movieType):
if not movieType:
return
currentUser = getRSMUser(username)
if movieType == "Seen":
list = currentUser.seen
elif movieType == "Likes":
list = currentUser.likes
elif movieType == "Blacklist":
list = currentUser.blacklist
else:
printDebug('BUG in my_movies view: __updateUserMoviesList')
list.clear()
for m in selectedList:
list.add(Movie.objects.get(title=m))
| Python |
#from django.db import models
#from RSM.my_movies.models import *
#from RSM.my_friends.models import *
| Python |
"""
Test recommendations module
"""
from django.test import TestCase
from RSM.my_friends.models import RSMUser, User
from RSM.my_friends.views import getUserNames
from RSM.my_movies.views import __updateUserMoviesList
from RSM.util import getRSMUser
from RSM.algorithm.search.makeCache import makeTestCache
getUserNames = getUserNames
updateUserMoviesList = __updateUserMoviesList
getRSMUser = getRSMUser
makeTestCache = makeTestCache
class RecommendationsTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nRecommendations Test" + str(RecommendationsTest.testCount) + "\n"
if (RecommendationsTest.added):
return
#adding some users to the system
user1 = User.objects.create_user("Tomer", "t@t.com", "1234")
user1.save()
RSMUser(user=user1).save()
user2 = User.objects.create_user("Eran", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
user2 = User.objects.create_user("Alex", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
#add some movies to the system
makeTestCache()
#create test setup
#Setting movie data
selectedList = ["Mov1","Mov2"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
selectedList = ["Mov4"]
username = "Tomer"
movieType = "Blacklist"
updateUserMoviesList(username,selectedList,movieType)
selectedList = ["Mov2","Mov5"]
username = "Eran"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
selectedList = ["Mov5","Mov1"]
username = "Alex"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
#setting friendships
self.client.login(password=u"1234",username="Tomer")
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
#setting groups
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"addFriend","groupname":"testGroup1"})
def tearDown(self):
print "\nRecommendations Test" + str(RecommendationsTest.testCount) + "\n"
RecommendationsTest.testCount += 1
def test_1_basicRecommend(self):
print "basic recommend: ask for recommendation to self based only on seen & black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/")
self.assertContains(response, "Mov3")
self.assertContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies were recommended"
def test_2_recommendSeen(self):
print "recommend with seen movies: ask for recommendation to self based only on black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/",{"recommendSeen":"true"})
self.assertContains(response, "Mov3")
self.assertContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertContains(response, "Mov1")
self.assertContains(response, "Mov2")
print "Only non blacklisted movies were recommended"
def test_3_recommendWithFriends(self):
print "recommend with friends: ask for recommendation to self based on self & friends seen & black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/",{"friendList":("Alex","Eran")})
self.assertContains(response, "Mov3")
self.assertNotContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies were recommended considering friends movies as well"
def test_4_recommendWithGroups(self):
print "recommend with groups: ask for recommendation to self based on self & group seen & black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/",{"groupList":"testGroup1"})
self.assertContains(response, "Mov3")
self.assertNotContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies were recommended considering group movies as well"
def test_5_recommendWithGroupsVsFriends(self):
print "comparing group and friends results"
self.client.login(password=u"1234",username="Tomer")
response1 = self.client.post("/recommendation/recommend/",{"groupList":"testGroup1"})
response2 = self.client.post("/recommendation/recommend/",{"friendList":("Alex","Eran")})
self.assertEqual(str(response1),str(response2))
print "the results are equal"
def test_6_basicRecommend(self):
print "selected recommend: ask for recommendation to self based only on seen & black list & chosen movies"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/", {"selectedList":("Mov3","Mov2")})
self.assertContains(response, "Mov3")
self.assertNotContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies from the list were recommended"
| Python |
'''
Created on 21/04/2011
@author: Eran_Z
'''
from django.conf.urls.defaults import patterns
urlpatterns = patterns('recommendation.views',
(r'^$', 'chooseFriends'),
(r'^recommend/$', 'recommend'),
) | Python |
from django.template import RequestContext
from django.shortcuts import render_to_response
from RSM.algorithm.feasibility import COR_algorithm
from RSM.my_movies.models import Movie
from RSM.util import verifySessionValidity, getRSMUser, getRSMGroup, getTitle
recommendationLimit = 5
def recommend(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currUser = getRSMUser(request.user.username)
recommendSeen = bool(request.POST.get(u'recommendSeen'))
limitWorld = bool(request.POST.get(u'limitWorld'))
if limitWorld:
#after clicking 'recommend' button with the 'limit world' checkbox checked
return __recommend_limitWorld(request, recommendSeen)
if request.POST.get(u'movieList'):
#redirected after updating likes, blacklist, etc.
return __recommend_afterUpdatingCheckboxes(request, currUser)
#first time in this page:
return __recommend_firstTime(request, currUser, recommendSeen)
def __recommend_limitWorld(request, recommendSeen):
return render_to_response('recommendation/chooseMovieSet.html', {'friendList': request.POST.getlist(u'friendList'), 'groupList': request.POST.getlist(u'groupList'), 'recommendSeen': recommendSeen, 'movies': map(getTitle, Movie.objects.all()) }, context_instance=RequestContext(request))
def __recommend_afterUpdatingCheckboxes(request, currUser):
movieList = request.POST.getlist(u'movieList')
new_seen = request.POST.getlist(u'seen')
new_like = request.POST.getlist(u'like')
new_blacklist = request.POST.getlist(u'blacklist')
__updateList(currUser.seen, new_seen, movieList)
__updateList(currUser.likes, new_like, movieList)
__updateList(currUser.blacklist, new_blacklist, movieList)
currUser.save()
context = set(map(getTitle, currUser.likes.all()))
seen = set(map(getTitle, currUser.seen.all()))
blacklist = set(map(getTitle, currUser.blacklist.all()))
movieResults = map(lambda r: Movie.objects.get(title=r), movieList)
return render_to_response('recommendation/results.html', {'results': [(m.title, m.year, m.link, (m.title in context), (m.title in seen), (m.title in blacklist))
for m in movieResults]}, context_instance=RequestContext(request))
def __recommend_firstTime(request, currUser, recommendSeen):
context = set(map(getTitle, currUser.likes.all()))
seen = set(map(getTitle, currUser.seen.all()))
blacklist = set(map(getTitle, currUser.blacklist.all()))
friends = set(map(lambda uname: getRSMUser(uname), request.POST.getlist(u'friendList')))
groups = map(lambda gn: getRSMGroup(currUser.user.username, gn), request.POST.getlist(u'groupList'))
for g in groups:
friends |= set(g.members.all())
friendsContext = context.copy()
if not recommendSeen:
friendsSeen = seen.copy()
friendsBlacklist = blacklist.copy()
for friend in friends:
friendsContext |= set(map(getTitle, friend.likes.all()))
if not recommendSeen:
friendsSeen |= set(map(getTitle, friend.seen.all()))
friendsBlacklist |= set(map(getTitle, friend.blacklist.all()))
world = __getWorld(friendsSeen if not recommendSeen else set(), friendsBlacklist)
if request.POST.get(u'selectedList'):
#after limiting world
world &= set(request.POST.getlist(u'selectedList'))
if not world:
return render_to_response('recommendation/noMatch.html', context_instance=RequestContext(request))
world = list(world)
if not friendsContext:
results = world[:recommendationLimit]
else:
results = COR_algorithm("Mutual Information", "Normalized Mutual Information", list(friendsContext), world)[:recommendationLimit]
movieResults = map(lambda r: Movie.objects.get(title=r), results)
return render_to_response('recommendation/results.html', {'results': [(m.title, m.year, m.link, (m.title in context), (m.title in seen), (m.title in blacklist))
for m in movieResults]}, context_instance=RequestContext(request))
def chooseFriends(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currUser = getRSMUser(request.user.username)
friends = currUser.friends.all()
groups = currUser.groups.all()
return render_to_response('recommendation/chooseFriends.html', {'friends': friends, 'groups': groups}, context_instance=RequestContext(request))
#Private functions
def __getUnfilteredWorld():
return map(getTitle, Movie.objects.all())
def __getWorld(seen, blacklist):
return set(__getUnfilteredWorld()) - seen - blacklist
def __updateList(src, update, all):
for m in set(all):
try:
src.remove(Movie.objects.get(title=m))
except:
pass
for m in set(update):
try:
src.add(Movie.objects.get(title=m))
except:
pass
#src = (src.all() - set(all)) | set(update)
| Python |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| Python |
from django.conf.urls.defaults import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^recommendation/', include('recommendation.urls')),
(r'^friends/', include('my_friends.urls')),
(r'^movies/', include('my_movies.urls')),
(r'^groups/', include('my_groups.urls')),
# Examples:
# url(r'^$', 'RSM.views.home', name='home'),
# url(r'^RSM/', include('RSM.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
# 'main' views
urlpatterns += patterns('RSM.views',
url(r'^$','index'),
url(r'^login/$','custom_login'),
url(r'^register/$','custom_register'),
url(r'^aboutUs/$','about_us'),
url(r'^profile/$','custom_profile'),
)
if settings.DEBUG:
# cache creation views
urlpatterns += patterns('',
(r'^resetCacheFiles/$', 'RSM.algorithm.search.makeCache.createBestMoviesFiles'),
(r'^resetCache/$', 'RSM.algorithm.search.makeCache.makeCache'),
)
| Python |
# Django settings for RSM project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PRINTS = DEBUG
SITE_ROOT = os.path.dirname(os.path.realpath(__file__)).replace('\\','/')
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, '../sqlite3.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Jerusalem'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
os.path.join(SITE_ROOT, 'templates/static'),
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a=ec)ul(6pvifzv_h(o1h30we=b*0&+2z*48d@$qr5j)+$vdrm'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.csrf.CsrfResponseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'RSM.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'RSM.recommendation',
'RSM.my_friends',
'RSM.my_movies',
'RSM.my_groups',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# 'doj', #deployment
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| Python |
from django.shortcuts import redirect, get_object_or_404
from my_friends.models import RSMUser
from my_groups.models import RSMGroup
import string
from django.conf import settings
def getRSMUser(username):
return get_object_or_404(RSMUser, user__username=username)
def getRSMGroup(username, groupname):
return get_object_or_404(RSMGroup, owner=username,groupName=groupname )
def getRSMGroup1(groupname):
return get_object_or_404(RSMGroup,groupName=groupname )
def getUserNames(rsmUsers):
return [u.user.username for u in rsmUsers]
def printDebug(str):
if settings.DEBUG_PRINTS:
print str
def verifySessionValidity(request):
if not request.user.is_authenticated():
return redirect('/?logout=true&reason=expired')
return None
def getExtend(request):
return "basic/homepage.html" if request.user.is_authenticated() else "basic/loggedOutMenu.html"
def getTitle(m):
return m.title
#These functions return True iff s is a string that we consider legal:
def isLegalName(s):
return __isAcceptableName(s, '"<>&')
def isSafeName(s):
return __isAcceptableName(s, '"\'<>&')
def __isAcceptableName(s, illegal_chars):
for c in s:
if c not in string.printable or c in illegal_chars:
return False
return True
| Python |
from django.contrib import auth
from django.contrib.auth.models import User
from django.template import RequestContext
from django.db import IntegrityError
from django.shortcuts import render_to_response, get_object_or_404, redirect
from RSM.my_friends.models import RSMUser
from RSM.util import printDebug, getExtend, verifySessionValidity, isSafeName
def index(request):
if request.user.is_authenticated():
if request.GET.get("change"):
if request.POST:
if (request.POST.get("pass") == request.POST.get("repPass")):
request.user.set_password(request.POST.get("pass"))
request.user.save()
auth.logout(request)
return render_to_response("basic/welcomepage.html",context_instance=RequestContext(request))
else:
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message':reasons['passMismatch']}, context_instance=RequestContext(request))
else:
return redirect('/')
if request.GET.get("logout"):
auth.logout(request)
variables = {}
if request.GET.get("reason"):
reason = request.GET.get("reason")
if reasons.has_key(reason):
variables = {"reason": reasons[reason]}
return render_to_response('basic/welcomepage.html', variables, context_instance=RequestContext(request))
rsmUser = get_object_or_404(RSMUser, user__username=request.user.username)
return render_to_response('basic/homepage.html', {"user": rsmUser}, context_instance=RequestContext(request))
else:
return render_to_response('basic/welcomepage.html', context_instance=RequestContext(request))
def custom_login(request):
if request.POST:
action = request.POST.get('action')
printDebug(action)
if (action == 'Login'):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if (user is not None and user.is_active):
auth.login(request, user)
rsmUser = get_object_or_404(RSMUser, user__username=request.user.username)
return render_to_response('basic/homepage.html', {"user": rsmUser}, context_instance=RequestContext(request))
else:
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['uNamePassErr']},context_instance=RequestContext(request))
else:
return redirect('/')
else:
return redirect('/')
def about_us(request):
return render_to_response('basic/aboutUs.html', {'extend': getExtend(request)}, context_instance=RequestContext(request))
def custom_register(request):
if request.POST:
email = request.POST.get('email','')
password = request.POST.get('pass', '')
repeatPassword = request.POST.get('repPass', '')
username = request.POST.get('inputUsername','')
if not __isLegalUsername(username):
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['illUName']}, context_instance=RequestContext(request))
if password != repeatPassword:
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['passMismatch']}, context_instance=RequestContext(request))
try:
user = User.objects.create_user(username, email, password)
rsmUser = RSMUser()
rsmUser.user=user
rsmUser.save()
except IntegrityError:
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message': reasons['userExists']}, context_instance=RequestContext(request))
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return render_to_response('basic/homepage.html', {"username": user}, context_instance=RequestContext(request))
else:
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['expired']}, context_instance=RequestContext(request))
return render_to_response('basic/register.html', context_instance=RequestContext(request))
def custom_profile(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
rsmUser = get_object_or_404(RSMUser, user__username=request.user.username)
return render_to_response("basic/profile.html", {"user": rsmUser},context_instance=RequestContext(request))
def __isLegalUsername(username):
return username and isSafeName(username) and ' ' not in username and '%' not in username
reasons = {'expired': 'Your session has expired.', 'passMismatch': "Passwords don't match.", 'uNamePassErr': "Wrong username or password!", 'illUName': "Illegal username.", 'userExists': "Username is already taken."}
| Python |
from django.db import models
from django.contrib.auth.models import User
class RSMUser(models.Model):
user = models.OneToOneField(User)
friends = models.ManyToManyField('self', symmetrical=False, blank=True)
seen = models.ManyToManyField('my_movies.Movie', related_name='viewers', blank=True)
likes = models.ManyToManyField('my_movies.Movie', related_name='fans', blank=True)
blacklist = models.ManyToManyField('my_movies.Movie', related_name='haters', blank=True)
groups = models.ManyToManyField('my_groups.RSMGroup', related_name='groupps', blank=True)
def __unicode__(self):
return self.user.__str__()
| Python |
"""
Test Friends module
"""
from django.test import TestCase
from models import RSMUser, User
#from django.template import RequestContext
from RSM.my_friends.views import getUserNames
getUserNames = getUserNames
class FriendsTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nFriends Test" + str(FriendsTest.testCount) + "\n"
if (FriendsTest.added):
return
#adding some users to the system
user1 = User.objects.create_user("Tomer", "t@t.com", "1234")
user1.save()
RSMUser(user=user1).save()
user2 = User.objects.create_user("Eran", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
user3 = User.objects.create_user("Alex", "t@t.com", "1234")
user3.save()
RSMUser(user=user3).save()
# FriendsTest.added = True
def tearDown(self):
print "\nEnd Friends Test" + str(FriendsTest.testCount) + "\n"
FriendsTest.testCount += 1
def test_1_basic(self):
print "Adding users and checking initial state"
#Checking init of test
userNameList = getUserNames(RSMUser.objects.all())
#Asserting users
assert "Tomer" in userNameList
assert "Eran" in userNameList
assert "Alex" in userNameList
assert "BlaBla" not in userNameList
#Printing users
for u in userNameList:
print "Added RSMUser " + str(u) + " to the system"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
self.assertEquals(n,0)
#Test Finnished
print "Added users to the system, none have friends"
def test_2_addFriend(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
print "Tomer has 1 Friend, No one else has friends"
def test_3_removeFriend(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/friends/remove/", {"friendsList":"Eran"})
print "Removed Eran from friends"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
self.assertEquals(n,0)
print "No one has friends"
def test_4_addMultipleFriends(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran as friend"
print "Added Alex as friend"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,2)
print "Tomer has 2 Friends, No one else has friends"
def test_5_removeMultipleFriends(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran as friend"
print "Added Alex as friend"
self.client.post("/friends/remove/", {"friendsList":("Eran", "Alex")})
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
self.assertEquals(n,0)
print "Tomer has 2 Friends, No one else has friends" | Python |
from django.conf.urls.defaults import patterns
urlpatterns = patterns('my_friends.views',
(r'^add/$','addFriend'),
(r'^all/$','viewAllFriends'),
(r'^remove/$','removeFriend'),
(r'^activeAdd/$','actualAdd'),
) | Python |
'''
Created on 21/04/2011
@author: Eran_Z
'''
from django.contrib import admin
from models import RSMUser
admin.site.register(RSMUser)
| Python |
from django.template import RequestContext
from django.shortcuts import render_to_response
from models import RSMUser
from RSM.util import verifySessionValidity, getUserNames, getRSMUser
def addFriend(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
li = getUserNames(RSMUser.objects.all())
li.remove(request.user.username)
for u in getUserNames(getRSMUser(request.user.username).friends.all()):
li.remove(u)
return render_to_response("my_friends/addfriend.html", {"users":li, "username": request.user.username}, context_instance=RequestContext(request))
def viewAllFriends(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
username = request.user.username
friendsNames = [u.user.username for u in getRSMUser(username).friends.all()]
hasToRemove = bool(len(friendsNames))
hasToAdd = len(RSMUser.objects.all()) != len(friendsNames)+1
return render_to_response("my_friends/viewfriends.html", {"friends": friendsNames, "hasToRemove": hasToRemove, "hasToAdd": hasToAdd}, context_instance=RequestContext(request))
def removeFriend(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
active_user = getRSMUser(request.user.username)
for o in request.POST.getlist(u'friendsList'):
friend = getRSMUser(o)
active_user.friends.remove(friend)
for gr in active_user.groups.all():
if friend in gr.members.all():
gr.members.remove(friend)
friendsNames = [u.user.username for u in getRSMUser(request.user.username).friends.all()]
hasToRemove = bool(len(friendsNames))
hasToAdd = len(RSMUser.objects.all()) != len(friendsNames)+1
return render_to_response("my_friends/viewfriends.html", {"friends": friendsNames, "hasToRemove": hasToRemove, "hasToAdd": hasToAdd}, context_instance=RequestContext(request))
def actualAdd(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
active_user = getRSMUser(request.user.username)
for o in request.POST.getlist(u'friendsToAdd'):
friend = getRSMUser(o)
active_user.friends.add(friend)
friendsNames = [u.user.username for u in getRSMUser(request.user.username).friends.all()]
hasToRemove = bool(len(friendsNames))
hasToAdd = len(RSMUser.objects.all()) != len(friendsNames)+1
return render_to_response("my_friends/viewfriends.html", {"friends": friendsNames, "hasToRemove": hasToRemove, "hasToAdd": hasToAdd}, context_instance=RequestContext(request))
| Python |
from django.db import models
class RSMGroup(models.Model):
owner = models.CharField(max_length=200)
groupName = models.CharField(max_length=200)
members = models.ManyToManyField('my_friends.RSMUser')
class Meta:
unique_together = ("owner", "groupName")
def __unicode__(self):
return self.groupName.__str__()
| Python |
"""
Test Groups module
"""
from django.test import TestCase
from RSM.my_friends.models import RSMUser, User
#from django.template import RequestContext
from RSM.my_friends.views import getUserNames
from RSM.util import getRSMGroup
getRSMGroup = getRSMGroup
getUserNames = getUserNames
class GroupsTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nGroups Test" + str(GroupsTest.testCount) + "\n"
if (GroupsTest.added):
return
#adding some users to the system
user1 = User.objects.create_user("Tomer", "t@t.com", "1234")
user1.save()
RSMUser(user=user1).save()
user2 = User.objects.create_user("Eran", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
user3 = User.objects.create_user("Alex", "t@t.com", "1234")
user3.save()
RSMUser(user=user3).save()
def tearDown(self):
print "\nEnd Groups Test" + str(GroupsTest.testCount) + "\n"
GroupsTest.testCount += 1
def test_1_basic(self):
print "Adding users and checking initial state"
#Checking init of test
userNameList = getUserNames(RSMUser.objects.all())
#Asserting users
assert "Tomer" in userNameList
assert "Eran" in userNameList
assert "Alex" in userNameList
assert "BlaBla" not in userNameList
#Printing users
for u in userNameList:
print "Added RSMUser " + str(u) + " to the system"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n1 = len(u.friends.all())
n2 = len(u.groups.all())
self.assertEquals(n1,0)
self.assertEquals(n2,0)
#Test Finnished
print "Added users to the system, none have friends, none are members of groups"
def test_2_addGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
#Adding a new group
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
print "Tomer has 1 groups, No one else has groups"
def test_3_AddFriendToGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),1)
print "Tomer has one group that has one member"
def test_4_removeFriendFromGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"removeFriend","groupname":"testGroup1"})
print "removed Eran from testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members"
def test_5_addMultipleFriendsToGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran & Alex as friends"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"addFriend","groupname":"testGroup1"})
print "added Eran & Alex to testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),2)
print "Tomer has one group that has two members"
def test_6_removeMultipleFriendsFromGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran & Alex as friends"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"addFriend","groupname":"testGroup1"})
print "added Eran & Alex to testGroup1"
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"removeFriend","groupname":"testGroup1"})
print "removed Eran & Alex from testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members"
def test_7_removeFriend(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
self.client.post("/friends/remove/", {"friendsList":"Eran"})
print "Removed Eran from friends, should be removed from group"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members"
def test_8_removeGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
#Adding a new group
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
#Removing a new group
self.client.post("/groups/remove/", {"groupname":"testGroup1"})
print "removed new group: testGroup1"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
self.assertEquals(n,0)
print "Tomer has 0 groups, No one else has groups"
def test_9_removeGroupWithFriends(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
#Removing a new group
self.client.post("/groups/remove/", {"groupname":"testGroup1"})
print "removed new group: testGroup1"
#Adding the group it should not include Eran
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members" | Python |
'''
Created on May 26, 2011
@author: dima
'''
from django.conf.urls.defaults import patterns
urlpatterns = patterns('my_groups.views',
(r'^allGroups/$','allGroups'),
(r'^create/$','createGroup'),
(r'^remove/$','removeGroup'),
(r'^activeAdd/$','actualAdd'),
(r'^manage/$','editGroup'),
(r'^editfriends/$','editFrendsInGroup'),
) | Python |
from django.template import RequestContext
from django.db import IntegrityError
from django.shortcuts import render_to_response, redirect
from models import RSMGroup
from RSM.util import verifySessionValidity, getRSMGroup, getUserNames, getRSMUser, isSafeName, getExtend
import hashlib
def allGroups(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currentUser = getRSMUser(request.user.username)
groupsNames = [(u.groupName, hashlib.md5(u.groupName).hexdigest()) for u in currentUser.groups.all()]
return render_to_response("my_groups/viewgroups.html", {"groups": groupsNames, "username": request.user.username}, context_instance=RequestContext(request))
def createGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
return render_to_response("my_groups/addgroup.html", {"username": request.user.username}, context_instance=RequestContext(request))
def actualAdd(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currentUser = getRSMUser(request.user.username)
groupName = request.POST.get(u'groupname')
if not groupName or not isSafeName(groupName):
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message': "Illegal group name."}, context_instance=RequestContext(request))
try:
newGroup = RSMGroup(owner = request.user.username, groupName = groupName)
newGroup.save()
except IntegrityError:
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message': "Group with this name already exists."}, context_instance=RequestContext(request))
currentUser.groups.add(newGroup)
return redirect('/groups/allGroups/')
def removeGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
current_user = getRSMUser(request.user.username)
groupName = request.POST.get(u'groupname')
group = getRSMGroup(request.user.username,groupName)
current_user.groups.remove(group)
group.delete()
return redirect('/groups/allGroups/')
def editGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
groupName = request.POST.get(u'group')
group = getRSMGroup(request.user.username,groupName)
currentUser = getRSMUser(request.user.username)
friends_not_in_group = getUserNames(currentUser.friends.all())
for u in getUserNames(group.members.all()):
friends_not_in_group.remove(u)
hasToAdd = bool(len(friends_not_in_group))
friends_in_group = getUserNames(currentUser.friends.all())
for u in friends_not_in_group:
friends_in_group.remove(u)
hasToRemove = bool(len(friends_in_group))
variables = {"fgroups":friends_in_group, "fngroups":friends_not_in_group,"hasToRemove":hasToRemove, "hasToAdd":hasToAdd, "username":request.user.username, "group": groupName}
return render_to_response("my_groups/edit_group.html", variables, context_instance=RequestContext(request))
def editFrendsInGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
groupName = request.POST.get(u'groupname')
group = getRSMGroup(request.user.username,groupName)
shouldAdd = request.POST.get(u'action') == "addFriend"
for o in request.POST.getlist(u'friends'):
friend = getRSMUser(o)
if (shouldAdd):
group.members.add(friend)
else:
group.members.remove(friend)
currentUser = getRSMUser(request.user.username)
friends_not_in_group = getUserNames(currentUser.friends.all())
for u in getUserNames(group.members.all()):
friends_not_in_group.remove(u)
hasToAdd = bool(len(friends_not_in_group))
friends_in_group = getUserNames(currentUser.friends.all())
for u in friends_not_in_group:
friends_in_group.remove(u)
hasToRemove = bool(len(friends_in_group))
variables = {"fgroups":friends_in_group, "fngroups":friends_not_in_group,"hasToRemove":hasToRemove, "hasToAdd":hasToAdd, "username":request.user.username, "group": groupName}
return render_to_response("my_groups/edit_group.html", variables, context_instance=RequestContext(request))
| Python |
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import ConfigParser
from ConfigParser import NoOptionError
import config
import getopt
import os
import sys
import utils
import time
def usage():
message = """
Welcome to johannes, the ugliest
downloading/unpacking/configuring/building and installation system of
them all. It could save you a lot of time though. This instance of
johannes will get, build and install the following: python, numpy,
wxpython, matplotlib, cmake, dcmtk, vtk, vtktudoss, vtkdevide, itk,
itktudoss, itkvtkglue, devide
Please read the included README.txt file NOW.
Build method A (the default) is as follows: Before starting
johannes.py, first run bootstrap_stage1.sh and bootstrap_stage2.sh to
download and install python. After that, run johannes as follows:
/you/new/python johannes.py -w working_directory
Options are as follows:
-w, --working-dir : specify working directory [REQUIRED]
-h, --help : show this help
-m, --mode : working mode, 'everything' (default),
'clean_build', 'get_only' or 'configure_only'
-p, --install-packages : specify comma-separated list of packages to work on,
default all. Example: -p "CMake,CableSwig"
Correct capitalisation IS important!
-d, --auto-deps : Automatically build per install package dependencies.
The default is not to do this, i.e. you have to
specify all required packages on the command-line
or in the johannes.cfg project file.
--no-prereq-check : do NOT do prerequisites check.
-v, --versions : display installed versions of all packages.
-t, --target : Specify a package to execute the 'mode' action on.
All build stages are performed on the non-target
packages. Does not work for auto-deps.
You can also specify project-specific options (for example specifying
install packages) in a johannes.cfg file placed at the top-level
of your working directory. See example-johannes.cfg for more info.
All of this ugliness is copyright 2006-2011 Charl P. Botha http://cpbotha.net/
and is hereby put under a BSD license.
"""
print message
def posix_prereq_check(working_dir):
"""Perform posix system check for prerequisite software.
Largest part of this checking is done in the second bootstrap
shell script (executed before this file). Here we check for basic
stuff like cvs, svn and patch.
"""
v = utils.find_command_with_ver(
'CVS', '%s -v' % (config.CVS,),
'\(CVS\)\s+(.*)\s+')
v = v and utils.find_command_with_ver(
'Subversion (SVN)', '%s --version' % (config.SVN,),
'version\s+(.*)$')
v = v and utils.find_command_with_ver(
'patch', '%s -v' % (config.PATCH,),
'^patch\s+(.*)$')
# now check that working_dir contains the required subdirs
dv = True
for wsub in ['archive', 'build', 'inst']:
cdir = os.path.join(working_dir, wsub)
if os.path.isdir(cdir):
msg = '%s exists.' % (cdir,)
else:
msg = '%s does not exist.' % (cdir,)
dv = False
utils.output(msg)
return v and dv
def windows_prereq_check(working_dir):
"""Perform Windows system check for prerequisite software and
directory structure.
"""
utils.output("Windows prerequisites check", 70, '#')
v = utils.find_command_with_ver(
'MS Visual Studio', '%s /?' % (config.DEVENV,),
'Visual Studio Version (.*)\.$')
#v = v and utils.find_command_with_ver(
# 'CMake', '%s --version' % (config.CMAKE_BINPATH,),
# '^cmake version\s+(.*)$')
v = v and utils.find_command_with_ver(
'CVS', '%s -v' % (config.CVS,),
'\((CVS|CVSNT)\)\s+(.*)\s+')
v = v and utils.find_command_with_ver(
'Subversion (SVN)', '%s --version' % (config.SVN,),
'version\s+(.*)$')
v = v and utils.find_command_with_ver(
'GIT', '%s --version' % (config.GIT,),
'version\s+(.*)$')
v = v and utils.find_command_with_ver(
'patch', '%s -v' % (config.PATCH,),
'^patch\s+(.*)$')
# now check that working_dir contains the required subdirs
dv = True
for wsub in ['archive', 'build', 'inst']:
cdir = os.path.join(working_dir, wsub)
if os.path.isdir(cdir):
msg = '%s exists.' % (cdir,)
else:
msg = '%s does not exist.' % (cdir,)
dv = False
utils.output(msg)
return v and dv
def main():
if len(sys.argv) < 2:
usage()
else:
start_time = time.time()
rpad = 60
rpad_char = '+'
# this is the default list of install packages
#
# you can override this by:
# - specifying packages on the johannes command line
# - specifying packages in the working dir johannes.py
# (command line has preference over config file)
#
# capitalisation has to match the capitalisation of your
# install package class, name of install package module is
# exactly that, but all lower case, so e.g. MyModule will
# become: install_packages.ip_mymodule.MyModule()
#
# johannes will:
# - attempt to import the ip_name from install_packages
# - instantiate ip_name.Name
#
ip_names = [
'pip',
'NumPy',
'WXPython',
'matplotlib',
'CMake',
'DCMTK',
'VTK56',
'IPython',
'VTKTUDOSS',
'ITK',
'SWIG',
'CableSwig',
'WrapITK',
'ItkVtkGlue',
'itkPyBuffer',
'ITKTUDOSS',
'GDCM',
'DeVIDE',
'VTKDEVIDE',
'SetupEnvironment'
]
try:
optlist, args = getopt.getopt(
sys.argv[1:], 'hm:p:dw:vt:',
['help', 'mode=', 'install-packages=',
'auto-deps',
'working-dir=',
'no-prereq-check', 'versions'
'target='])
except getopt.GetoptError,e:
usage()
return
mode = 'everything'
#ip_names = None
working_dir = None
profile = 'default'
no_prereq_check = False
ip_names_cli = False
auto_deps = False
target = None
for o, a in optlist:
if o in ('-h', '--help'):
usage()
return
elif o in ('-m', '--mode'):
if a in ('clean', 'clean_build'):
mode = 'clean_build'
else:
mode = a
elif o in ('--install-packages'):
# list of package name to perform the action on
ip_names = [i.strip() for i in a.split(',')]
# remember that the user has specified ip_names on the command-line
ip_names_cli = True
elif o in ('-d', '--auto-deps'):
auto_deps = True
elif o in ('-w', '--working-dir'):
working_dir = a
elif o in ('--profile'):
profile = a
elif o in ('--no-prereq-check'):
no_prereq_check = True
elif o in ('-v', '--versions'):
mode = 'show_versions'
elif o in ('-t', '--target'):
target = a
# we need at LEAST a working directory
if not working_dir:
usage()
return
# init config (DURR)
config.init(working_dir, profile)
# set some variables we'll need to check later depending on
# the configuration
ip_dirs = []
# now try to read johannes config file from the working dir
cp = ConfigParser.ConfigParser()
# returns list of filenames successfully parsed
cfgfns = cp.read(os.path.join(working_dir, 'johannes.cfg'))
if cfgfns and cp.has_section('default'):
if not ip_names_cli:
# first packages that need to be installed
# we only do this if the user has NOT specified install
# packages on the command line.
try:
ip_names = [i.strip()
for i in cp.get(
'default', 'packages').split(',')]
except NoOptionError:
pass
# also try to read extra install package paths
# this is also a comma separated list
try:
ip_dirs = [i.strip()
for i in cp.get(
'default', 'ip_dirs').split(',')]
except NoOptionError:
pass
# if user is asking for versions, we don't do the
# prerequisites check as we're not going to build anything
if mode == 'show_versions':
no_prereq_check = True
if os.name == 'nt' and not no_prereq_check:
if not windows_prereq_check(working_dir):
utils.output(
'Windows prerequisites do not check out. '
'Fix and try again.', 70, '-')
return
else:
utils.output(
'Windows prerequisites all good.', 70, '-')
elif os.name == 'posix' and not no_prereq_check:
if not posix_prereq_check(working_dir):
utils.output(
'Posix prerequisites do not check out. '
'Fix and try again.', 70, '-')
return
else:
utils.output(
'Posix prerequisites all good.', 70, '-')
# In case of a target, check whether the target is actually specified
# in the ip list (does not check dependencies in case of auto-deps)
if target != None:
if not target in ip_names:
utils.error("Target '%s' was not found in the install package list." % target)
# we're going to do some imports, so let's set the sys.path
# correctly.
# 1. first the default install packages dir config.ip_dir
sys.path.insert(0, config.ip_dir)
# 2. insert the extra specified paths BEFORE that, so they get
# preference
for uip_dir in ip_dirs:
sys.path.insert(0, uip_dir)
# now import only the specified packages
ip_instance_list = []
imported_names = []
def import_ip(ip_name):
# don't import more than once
if ip_name in imported_names:
return
# turn Name into ip_name
ip_name_l = 'ip_' + ip_name.lower()
# import the module, but don't instantiate the ip class yet
ip_m = __import__(ip_name_l)
# import dependencies first if user has specified
# auto-deps.
if auto_deps:
for dep in ip_m.dependencies:
import_ip(dep)
# instantiate ip_name.Name
ip = getattr(ip_m, ip_name)()
ip_instance_list.append(ip)
imported_names.append(ip_name)
print "%s imported from %s." % \
(ip_name, ip_m.__file__)
# import all ip_names, including dependencies
for ip_name in ip_names:
import_ip(ip_name)
# now check for dependencies and error if necessary
# (in the case of auto_deps this will obviously be fine)
deps_errors = []
for ip in ip_instance_list:
n = ip.__class__.__name__
# there must be a more elegant way to get the module instance?
deps = sys.modules[ip.__module__].dependencies
for d in deps:
# remember that if a package asks for "VTK", "VTK561" is also fine
d_satisfied = False
for ip_name in imported_names:
if ip_name.startswith(d):
d_satisfied = True
# we don't have to finish more loops
break
elif ip_name == n:
# this means we have reached the module whose deps
# we're checking without satisfying dependency d,
# which also means dependency problems, so we
# can jut break out of the for loop
break
if not d_satisfied:
deps_errors.append('>>>>> Unsatisfied dependency: %s should be specified before %s' % (d, n))
if deps_errors:
print "\n".join(deps_errors)
utils.error("Unsatisfied dependencies. Fix and try again.")
def get_stage(ip, n):
utils.output("%s :: get()" % (n,), rpad, rpad_char)
ip.get()
def unpack_stage(ip, n):
utils.output("%s :: unpack()" % (n,), rpad, rpad_char)
ip.unpack()
def configure_stage(ip, n):
utils.output("%s :: configure()" % (n,), rpad, rpad_char)
ip.configure()
def build_stage(ip, n):
utils.output("%s :: build()" % (n,), rpad, rpad_char)
ip.build()
def install_stage(ip, n):
utils.output("%s :: install()" % (n,), rpad, rpad_char)
ip.install()
def all_stages(ip, n):
get_stage(ip, n)
unpack_stage(ip, n)
configure_stage(ip, n)
build_stage(ip, n)
install_stage(ip, n)
if mode == 'show_versions':
utils.output('Extracting all install_package versions.')
print "python: %d.%d.%d (%s)" % \
(sys.version_info[0:3] +
(config.PYTHON_EXECUTABLE,))
for ip in ip_instance_list:
n = ip.__class__.__name__
if not n in ip_names:
# n is a dependency, so do everything
utils.output("%s (DEPENDENCY)" % (n,), 70, '#')
all_stages(ip, n)
elif target != None and target != n:
# A target has been specified (but this ip is not it),
# so do everything for all other install packages we encounter.
utils.output("%s (NON-TARGET)" % (n,), 70, '#')
all_stages(ip, n)
elif mode == 'get_only':
utils.output("%s GET_ONLY" % (n,), 70, '#')
utils.output("%s" % (n,), 70, '#')
get_stage(ip, n)
elif mode == 'unpack_only':
utils.output("%s UNPACK_ONLY" % (n,), 70, '#')
utils.output("%s" % (n,), 70, '#')
unpack_stage(ip, n)
elif mode == 'configure_only':
utils.output("%s CONFIGURE_ONLY" % (n,), 70, '#')
utils.output("%s" % (n,), 70, '#')
configure_stage(ip, n)
elif mode == 'everything':
utils.output("%s" % (n,), 70, '#')
all_stages(ip, n)
elif mode == 'clean_build':
utils.output("%s CLEAN_BUILD" % (n,), 70, '#')
ip.clean_build()
elif mode == 'show_versions':
print '%s: %s' % (n, ip.get_installed_version())
elif mode == 'rebuild':
utils.output("%s REBUILD" % (n,), 70, '#')
# clean up
ip.clean_build()
# rebuild (executes all stages, as previous
# stages are required and user will likely
# need an install also)
all_stages(ip, n)
elif mode == 'reinstall':
utils.output("%s REINSTALL" % (n,), 70, '#')
# clean up
ip.clean_install()
# reinstall
all_stages(ip, n)
else:
utils.output("%s CUSTOM MODE" % (n,), 70, '#')
if hasattr(ip, mode):
utils.output("%s :: %s()" % (n, mode), rpad, rpad_char)
getattr(ip, mode)()
else:
utils.error("Mode not found: %s" % (mode,))
if mode != 'show_versions':
# Print elapsed time and final message
t = time.time() - start_time
utils.output("Execution time (h:mm:ss): %d:%02d:%02d" %
(int(t/3600), int((t%3600)/60), t%60))
utils.output("Now please read the RESULTS section of README.txt!")
if __name__ == '__main__':
main()
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
###################################################################
# the following programmes should either be on your path, or you
# should specify the full paths here.
# Microsoft utility to rebase files.
REBASE = "rebase"
MAKE_NSIS = "makensis"
STRIP = "strip"
CHRPATH = "chrpath"
FILE = "file"
# end of programmes ###############################################
import config
import getopt
import os
import re
import sys
import shutil
import tarfile
import utils
PPF = "[*** DRE build installer ***]"
S_PPF = "%s =====>>>" % (PPF,) # used for stage headers
class BDIPaths:
dre_basename = None
dre_dest = None
def copy_inst_to_dre():
"""Copy the dre top-level dir (inst) to final devide-re
"""
print S_PPF, 'Copying INST to DRE.'
if os.path.isdir(BDIPaths.dre_dest):
print PPF, 'DRE dir already present. Skipping step.'
return
print PPF, 'Working ...'
# using ignore callback to give progress
def _logpath(path, names):
# would be better to print only if the top-level dir within
# config.inst_dir changes...
print "Copying %s." % (path,)
return []
# copy symlinks as symlinks!
shutil.copytree(config.inst_dir, BDIPaths.dre_dest,
symlinks=True, ignore=_logpath)
print PPF, 'DONE copying INST to DRE.'
def postproc_sos():
if os.name != 'posix':
return
print S_PPF, "postproc_sos (strip, chrpath)"
res = re.compile(
"^(.*):.*ELF.*(executable|relocatable|shared object).*, not stripped"
)
rec = re.compile('.*\.(so$|so\.)')
# use 'file' command to find all strippable files
print PPF, "Creating complete file list..."
all_files, _ = utils.find_files(BDIPaths.dre_dest, '.*')
print PPF, "Searching for strippable / chrpathable files"
for f in all_files:
status, output = utils.get_status_output('%s %s' % (FILE, f))
mo = re.match(res, output)
stripped = chrpathed = False
if mo:
sfn = mo.groups()[0]
ret = os.system('%s %s' % (STRIP, sfn))
if ret != 0:
print "Error stripping %s." % (sfn,)
else:
stripped = True
# now check if f can be chrpathed
if re.match(rec, f):
# remove rpath information
ret = os.system('%s --delete %s' % (CHRPATH, f))
if ret != 0:
print "Error chrpathing %s." % (f,)
else:
chrpathed = True
if stripped or chrpathed:
actions = []
if stripped:
actions.append('STRIPPED')
if chrpathed:
actions.append('CHRPATHED')
print "%s: %s" % (f, ','.join(actions))
def rebase_dlls():
"""Rebase all DLLs in the distdevide tree on Windows.
"""
if os.name == 'nt':
print S_PPF, "rebase_dlls"
# sqlite3.dll cannot be rebased; it even gets corrupted in the
# process! see this test:
# C:\TEMP>rebase -b 0x60000000 -e 0x1000000 sqlite3.dll
# REBASE: *** RelocateImage failed (sqlite3.dll).
# Image may be corrupted
# get list of pyd / dll files, excluding sqlite3
# this returns full path names
so_files, excluded_files = utils.find_files(
BDIPaths.dre_dest, '.*\.(pyd|dll)', ['sqlite3\.dll'])
# add newline to each and every filename
so_files = ['%s\n' % (i,) for i in so_files]
print "Found %d DLL PYD files..." % (len(so_files),)
print "Excluded %d files..." % (len(excluded_files),)
# open file in specfile_dir, write the whole list
dll_list_fn = os.path.join(
config.working_dir, 'dll_list.txt')
dll_list = file(dll_list_fn, 'w')
dll_list.writelines(so_files)
dll_list.close()
# now run rebase on the list
os.chdir(config.working_dir)
ret = os.system(
'%s -b 0x60000000 -e 0x1000000 @dll_list.txt -v' %
(REBASE,))
# rebase returns 99 after rebasing, no idea why.
if ret != 99:
raise RuntimeError('Could not rebase DLLs.')
def package_dist():
"""4. package and timestamp distributables (nsis on win, tar on
posix)
"""
print S_PPF, "package_dist"
# get devide version (we need this to stamp the executables)
cmd = '%s -v' % (os.path.join(BDIPaths.dre_dest, 'dre devide'),)
s,o = utils.get_status_output(cmd)
# s == None if DeVIDE has executed successfully
if s:
raise RuntimeError('Could not exec DeVIDE to extract version.')
mo = re.search('^DeVIDE\s+(v.*)$', o, re.MULTILINE)
if mo:
devide_ver = mo.groups()[0]
else:
raise RuntimeError('Could not extract DeVIDE version.')
# now get 32 or 64bit: we're going to use this in the package
# naming.
import platform
if platform.architecture()[0] == '64bit':
bits_str = '64'
else:
bits_str = '32'
if os.name == 'nt':
nsi_dir = 'archive\dre\support'
os.chdir(os.path.join(
config.working_dir, nsi_dir))
if config.WINARCH_STR == 'x64':
NSI_FILE = 'devide-re-x64.nsi'
shutil.copy('devide-re.nsi', NSI_FILE)
# fix redist for win64
# also fix installation dir to "program files" and not "program
# files (x86)
utils.re_sub_filter_file(
[('vcredist_x86', 'vcredist_x64'),
('\$PROGRAMFILES\\\\', '$PROGRAMFILES64\\\\')],
NSI_FILE)
else:
NSI_FILE = 'devide-re.nsi'
# go to working dir
os.chdir(config.working_dir)
# /nocd tells makensis not to change to the directory
# containing the nsi file.
cmd = '%s /NOCD archive\dre\support\%s' \
% (MAKE_NSIS, NSI_FILE)
ret = os.system(cmd)
if ret != 0:
raise RuntimeError('Error running NSIS.')
# nsis creates devidesetup.exe - we're going to rename to
# devide-re-v9.8.1234-win64-setup.exe
platform_str = 'win' + bits_str
os.rename('devide-re-setup.exe',
'devide-re-%s-%s-setup.exe' % \
(devide_ver, platform_str))
else:
# go to the working dir
os.chdir(config.working_dir)
platform_str = 'lin' + bits_str
# basename will be e.g. devide-re-v9.8.2341-lin64
basename = '%s-%s-%s' % \
(BDIPaths.dre_basename, devide_ver, platform_str)
tarball = '%s.tar.bz2' % (basename,)
if os.path.exists(tarball):
print PPF, '%s exists, not repacking.' % (tarball,)
return
print PPF, 'Packing %s' % (tarball,)
os.rename(BDIPaths.dre_basename, basename)
# create tarball with juicy stuff
tar = tarfile.open(tarball, 'w:bz2')
# recursively add directory
tar.add(basename)
# finalize
tar.close()
# rename devide-version back to distdevide
os.rename(basename, BDIPaths.dre_basename)
print PPF, 'DONE.'
def posix_prereq_check():
print S_PPF, 'POSIX prereq check'
# gnu
# have the word version anywhere
v = utils.find_command_with_ver(
'strip',
'%s --version' % (STRIP,),
'([0-9\.]+)')
v = v and utils.find_command_with_ver(
'chrpath',
'%s --version' % (CHRPATH,),
'version\s+([0-9\.]+)')
return v
def windows_prereq_check():
print S_PPF, 'WINDOWS prereq check'
# if you give rebase any other command-line switches (even /?) it
# exits with return code 99 and outputs its stuff to stderr
# with -b it exits with return code 0 (expected) and uses stdout
v = utils.find_command_with_ver(
'Microsoft Rebase (rebase.exe)',
'%s -b 0x60000000' % (REBASE,),
'^(REBASE):\s+Total.*$')
v = v and utils.find_command_with_ver(
'Nullsoft Installer System (makensis.exe)',
'%s /version' % (MAKE_NSIS,),
'^(v[0-9\.]+)$')
# check that the user has put the correct vcredist_[].exe in the
# archive dir
if os.path.exists(os.path.join(
config.archive_dir, 'vcredist_%s.exe' % (config.WINARCH_STR,))):
vcr = True
else:
vcr = False
print PPF, """vcredist_[x86,x64].exe not found.
Please download vcredist_[x86,x64].exe for VS 2008 SP1 and put it in
the johannes archive directory.
"""
return v and vcr
def usage():
print "Yo. HELP."
def main():
if len(sys.argv) < 2:
usage()
return
try:
optlist, args = getopt.getopt(
sys.argv[1:], 'w:',
['working-dir='])
except getopt.GetoptError,e:
usage()
return
working_dir = None
for o, a in optlist:
if o in ('-h', '--help'):
usage()
return
if o in ('-w', '--working-dir'):
working_dir = a
if working_dir is None:
usage()
return
config.init(working_dir, the_profile='default')
BDIPaths.dre_basename = 'devide-re'
BDIPaths.dre_dest = os.path.join(
config.working_dir, BDIPaths.dre_basename)
# dependency checking
if os.name == 'nt':
if not windows_prereq_check():
print PPF, "ERR: Windows prerequisites do not check out."
return 1
else:
if not posix_prereq_check():
print PPF, "ERR: POSIX prerequisites do not check out."
return 1
# 1. copy the whole inst dir to 'devide'
copy_inst_to_dre()
# 2. posix: strip / chrpath
# nt: rebase
if os.name == 'nt1':
rebase_dlls()
elif os.name == 'posix':
postproc_sos()
# 3. posix: tar her up
# nt: nsis
package_dist()
if __name__ == '__main__':
main()
| Python |
# python script for bootstrapping the johannes DeVIDE build system
#
PYVER_STR = '2.7.2'
PYVER_STR2 = '2.7'
import config
import getopt
import os
import shutil
import stat
import sys
import utils
import glob
nt_python = """
@echo off
@rem script to run locally installed johannes python
@rem should be located in johannes wd\jpython.cmd
@rem as it assumes the local install of python is in
@rem wd\inst\python and it's in wd
%~dp0\inst\python\python.exe %1 %2 %3 %4 %5 %6 %7 %8 %9
"""
# I tried with a jython.sh script that sets up the environment and
# then runs the correct python, but even with exports, if that python
# process then restarted another python (with os.system) it would not
# get the modified environment (LD_LIBRARY_PATH), and would hence not
# be able to find its own libraries.
posix_python = """
#!/bin/sh
# script to setup environment for running local python
# this should be sourced before running python
# double-check with 'which python' that the locally installed
# version is running...
MYDIR=%s
export LD_LIBRARY_PATH=$MYDIR/python/lib
export PATH=$MYDIR/python/bin/:$PATH
"""
# script to test for presence of required libs on posix
posix_deps_test_c_file = """
#include <bzlib.h>
#include <sqlite3.h>
#include <ncurses.h>
#include <readline/readline.h>
#include <gtk/gtkversion.h>
#include <ft2build.h>
#include <png.h>
#include <zlib.h>
#include <X11/Intrinsic.h>
#include <GL/glu.h>
#include <openssl/ssl.h>
int main(void) {}
"""
def download_python():
urlbase = 'http://python.org/ftp/python/%s' % (PYVER_STR,)
if os.name == 'posix':
fname = 'Python-%s.tar.bz2' % (PYVER_STR,)
url = '%s/%s' % (urlbase, fname)
elif os.name == 'nt':
if config.WINARCH == '32bit':
fname = 'python-%s.msi' % (PYVER_STR,)
url = '%s/%s' % (urlbase, fname)
else:
fname = 'python-%s.amd64.msi' % (PYVER_STR,)
url = '%s/%s' % (urlbase, fname)
print "##### Bootstrapping with %s Python. #####" % (config.WINARCH,)
utils.goto_archive()
utils.urlget(url)
return fname
def usage():
message = """
Invoke with:
python bootstrap.py -w working_directory
"""
print message
def main():
try:
optlist, args = getopt.getopt(
sys.argv[1:], 'w:',
['working-dir='])
except getopt.GetoptError,e:
usage()
return
working_dir = None
print optlist
for o, a in optlist:
if o in ('-w', '--working-dir'):
working_dir = a
if not working_dir:
usage()
return
# this will setup the necessary dirs for later calls into utils
config.init(working_dir, None)
# first create directory structure
prepare_dirs(working_dir)
# now download the python (source for linux, binaries for windows)
python_fname = download_python()
if os.name == 'nt':
# this means we just have to unpack python
py_msi_dir = os.path.join(config.archive_dir, python_fname)
py_inst_dir = os.path.join(config.inst_dir, 'python')
if os.path.exists(py_inst_dir):
utils.output(
'Python installation dir present. Skipping install.')
else:
utils.output('Doing local installation of Python.')
# run with basic interface
# ret is 0 if successful
ret = os.system(
'msiexec /a %s TARGETDIR=%s /qb' %
(py_msi_dir, py_inst_dir))
if ret != 0:
utils.error(
'Failed locally installing Python. EFS / msiexec problems?')
# Remove antigravity easter egg, which can be annoying at times
antigravity = os.path.join(py_inst_dir, 'Lib', 'antigravity.py')
if os.path.exists(antigravity):
utils.output('Removing antigravity.')
os.remove(antigravity)
sxs_manifest_dest = os.path.join(
py_inst_dir, 'Microsoft.VC90.CRT.manifest')
if not os.path.exists(sxs_manifest_dest):
utils.output(
'Copying Python MSVCRT 9.0 runtime libs.')
# now copy the frikking VS2008 RTM (i.e. NOT SP1) from the system
sr = os.environ.get('SYSTEMROOT')
sxsd = os.path.join(sr, 'WinSxS')
if config.WINARCH == '64bit':
astr = 'amd64'
else:
astr = 'x86'
mbase = '%s_Microsoft.VC90.CRT_1fc8b3b9a1e18e3b_'+ \
'9.0.21022.8_*'
mbase = mbase % (astr,)
mfn = os.path.join(
sxsd, 'Manifests',
'%s.manifest' % (mbase,))
mfns = glob.glob(mfn)
if len(mfns) == 0:
utils.error('No manifest file found that matches %s' % mfn)
if len(mfns) > 1:
utils.error('Multiple manifest files found that match %s' % mfn)
mfn = mfns[0]
mbase = os.path.splitext(os.path.split(mfn)[-1])[0]
# copy the manifest file
shutil.copy(
mfn, sxs_manifest_dest)
# now copy the DLLs
dllsd = os.path.join(sxsd, mbase)
for dllfn in ['msvcm90.dll', 'msvcp90.dll',
'msvcr90.dll']:
shutil.copy(os.path.join(
dllsd, dllfn), os.path.join(
py_inst_dir, dllfn))
jpcmd = 'jpython.cmd'
jpc_fn = os.path.join(config.working_dir, jpcmd)
f = open(jpc_fn, 'w')
f.write(nt_python)
f.close()
ilines = """
%s johannes.py -w %s
""" % (jpc_fn, config.working_dir)
else:
if not posix_deps_test_c():
print """
JOHANNES ##### cc (compiler) or necessary headers not found.
See error above. Please fix and try again.
* See the johannes README.txt for more details on which packages to
install, and also for correct apt-get invocation to install them all
on for example Debian / Ubuntu.
"""
return
if not posix_test_cc():
utils.output('c++ compiler not found.')
return
utils.goto_build()
tbfn = os.path.join(config.archive_dir, python_fname)
pybasename = 'Python-%s' % (PYVER_STR,)
build_dir = os.path.join(config.build_dir, pybasename)
if not os.path.exists(build_dir):
utils.unpack(tbfn)
os.chdir(build_dir)
ret = os.system(
'./configure --enable-shared --prefix=%s/python' %
(config.inst_dir,))
if ret != 0:
utils.error('Python configure error.')
# config.MAKE contains -j setting
# I've had this break with Python 2.6.2, so I'm using straight make here...
ret = os.system('%s install' % ('make',))
if ret != 0:
utils.error('Python build error.')
# we want for example inst/python/lib/python2.7/config/Makefile
pyconfig_makefile = os.path.join(
config.inst_dir, 'python', 'lib',
'python%s' % (PYVER_STR2,), 'config', 'Makefile')
# it hard-codes the absolute python home in the Makefile, we replace this
# with something that will work when "dre shell" is active.
utils.re_sub_filter_file(
[('^prefix\s*=.*$','prefix = ${DRE_TOP}/python')],
pyconfig_makefile)
# this means we have to test for dependencies and then build
# Python.
sefn = 'jpython_setup_env.sh'
jpcmd_fn = os.path.join(config.working_dir, sefn)
f = open(jpcmd_fn, 'w')
f.write(posix_python % (config.inst_dir,))
f.close()
# make it executable
#os.chmod(jpcmd_fn, stat.S_IEXEC)
ilines = """
source %s
which python
python johannes.py -w %s
""" % (sefn, config.working_dir)
print """
######################################################################
Successfully bootstrapped local johannes Python. Start the full build
system with:
%s
""" % (ilines,)
def posix_deps_test_c():
utils.goto_build()
f = open('dtest.c', 'w')
f.write(posix_deps_test_c_file)
f.close()
ret = os.system(
'cc -I/usr/include/gtk-2.0 -I/usr/include/freetype2 -o dtest dtest.c')
# True if successful
return bool(ret == 0)
def posix_test_cc():
utils.goto_build()
f = open('cpptest.cc', 'w')
f.write('int main(void) {}')
f.close()
ret = os.system('c++ -o cpptest cpptest.cc')
# True if successful
return bool(ret == 0)
def prepare_dirs(working_dir):
a_dir = os.path.join(working_dir, 'archive')
b_dir = os.path.join(working_dir, 'build')
i_dir = os.path.join(working_dir, 'inst')
for d in [working_dir, a_dir, b_dir, i_dir]:
if not os.path.exists(d):
os.mkdir(d)
if __name__ == '__main__':
main()
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
# for binaries NOT on your PATH, you should specify the complete path here,
# e.g. SVN = '/usr/bin/svn'. For binaries ON your path, only the binary name
# e.g. SVN = 'svn'
SVN = 'svn'
HG = 'hg'
CVS = 'cvs -z3'
GIT = 'git'
PATCH = 'patch'
# only required on Windows
DEVENV = 'devenv'
# on windows, cmake should be on your path, or you should specify the
# full path here. On *ix, you don't have to touch this (johannes
# builds and configures its own cmake)
CMAKE_BINPATH = 'cmake'
# set to True if you want to use distcc on *ix, False otherwise
HAVE_DISTCC = False
# on *ix, use this many parallel make processes
# if you're using distcc, this should be even higher.
NUM_MAKE_PROCESSES = 4
# Set to True if you want to build redistributable DeVIDE binaries
# with PyInstaller as part of the johannes build process. If False,
# you can still run DeVIDE directly from its build directory, and you
# can also create redistributable binaries at a later stage.
BUILD_DEVIDE_DISTRIBUTABLES = False
# nothing for you to edit below this line
#######################################################################
import os
import sys
# this is manually updated by the DeVIDE developers to indicate
# which changeset of DeVIDE this johannes changeset is able to build
DEVIDE_CHANGESET_ID = "5bd1581ebcab"
# this should be the date of the above changeset ID
# and probably the new-style DeVIDE versioning
# so for release: DeVIDE v11.9.16
# for dev: DeVIDE vDEV11.9.16
DEVIDE_DATESTR = "12.4.18"
# contains fixes for dre_top being duplicated, hence
# breaking the pythonhome!
DRE_CHANGESET_ID = "34c8b63b2ac9"
VTKDEVIDE_CHANGESET_ID = "bdc8e1f7e6e6"
BUILD_TARGET = 'RelWithDebInfo'
# will be filled in by init()
JOHANNES_REVISION_ID = "NOT SET"
# the following variables are written by various InstallPackages
####################################################################
# will be written by init()
MAKE = ''
SO_EXT = ''
PYE_EXT = ''
EXE_EXT = ''
WINARCH = ''
WINARCH_STR = ''
# together with CMAKE_BIN_PATH, these will be used by the utils
# modules to build up a cmake command.
CMAKE_DEFAULT_PARAMS = '' # this will be set by init()
CMAKE_PRE_VARS = ''
DCMTK_INCLUDE = ''
DCMTK_LIB = ''
VTK_DIR = ''
VTK_LIB = ''
VTK_SODIR = ''
VTK_PYTHON = ''
ITK_LIB = ''
ITK_BIN = ''
ITK_PYTHON = ''
GDCM_LIB = ''
GDCM_PYTHON = ''
VTKDEVIDE_LIB = ''
VTKDEVIDE_PYTHON = ''
VTKTUDOSS_LIB = ''
VTKTUDOSS_PYTHON =''
WX_LIB_PATH = ''
WXP_PYTHONPATH = ''
ITK_DIR = ''
ITK_BIN = ''
WRAPITK_LIB = ''
WRAPITK_PYTHON = ''
DEVIDE_PY = ''
PYTHON_EXECUTABLE = ''
PYTHON_INCLUDE_PATH = ''
PYTHON_LIBRARY = ''
DEVIDE_INST_DIR = ''
#######################################################################
# UTILITY method (also available in utils.py which we don't want to import)
def get_status_output(command):
"""Run command, return output of command and exit code in status.
In general, status is None for success and 1 for command not
found.
"""
ph = os.popen(command)
output = ph.read()
status = ph.close()
return (status, output)
def init(wd, the_profile):
global working_dir, archive_dir, build_dir, inst_dir
working_dir = os.path.abspath(wd)
archive_dir = os.path.join(working_dir, 'archive')
build_dir = os.path.join(working_dir, 'build')
inst_dir = os.path.join(working_dir, 'inst')
# we will also need directory where johannes finds itself, in
# order to retrieve patches.
global johannes_dir, patches_dir, ip_dir
johannes_dir = os.path.dirname(__file__)
patches_dir = os.path.join(johannes_dir, 'patches')
ip_dir = os.path.join(johannes_dir, 'install_packages')
# get revision ID
global JOHANNES_REVISION_ID
status, output = get_status_output("%s id %s" % (HG, johannes_dir))
# strip is in case we have single token to get rid of \n
JOHANNES_REVISION_ID = output.split(' ')[0].strip()
global profile
profile = the_profile
global python_library_path, python_binary_path, python_scripts_path
python_library_path = os.path.join(inst_dir, 'python', 'lib')
if os.name == 'nt':
python_binary_path = os.path.join(inst_dir, 'python')
else:
python_binary_path = os.path.join(inst_dir, 'python', 'bin')
python_scripts_path = os.path.join(inst_dir, 'python', 'Scripts')
# platform dependent stuff =========================================
# use conditionals based on os.name (posix, nt) and sys.platform (linux2,
# win32)
global MAKE, DEVENV, CMAKE_DEFAULT_PARAMS, CMAKE_PRE_VARS
global SO_EXT, PYE_EXT, EXE_EXT
# FIXME: change convention to x86, amd64, ia64 instead of 32bit and 64bit.
# Go through all user code to fix.
global WINARCH, WINARCH_STR
if os.name == 'posix':
CMAKE_DEFAULT_PARAMS = '-G "Unix Makefiles"'
MAKE = 'make -j%d' % (NUM_MAKE_PROCESSES,)
if HAVE_DISTCC:
CMAKE_PRE_VARS = 'CC="distcc cc" CXX="distcc c++"'
else:
CMAKE_PRE_VARS = ''
SO_EXT = '.so'
PYE_EXT = SO_EXT
elif os.name == 'nt':
import platform
a = platform.architecture()[0]
if a == '32bit':
CMAKE_DEFAULT_PARAMS = '-G "Visual Studio 9 2008"'
# where the %s substitution is the SLN file
# important that devenv is run, and NOT devenv.exe!
MAKE = DEVENV + ' %s /project %s ' \
'/projectconfig "%s|Win32" /build %s'
WINARCH = '32bit'
WINARCH_STR = 'x86'
else:
CMAKE_DEFAULT_PARAMS = '-G "Visual Studio 9 2008 Win64"'
# where the %s substitution is the SLN file
# important that devenv is run, and NOT devenv.exe!
MAKE = DEVENV + ' %s /project %s ' \
'/projectconfig "%s|x64" /build %s'
WINARCH = '64bit'
WINARCH_STR = 'x64'
SO_EXT = '.dll'
PYE_EXT = '.pyd'
EXE_EXT = '.exe'
# now setup some python stuff
global PYTHON_EXECUTABLE
global PYTHON_INCLUDE_PATH
global PYTHON_LIBRARY
global PYTHON_SITE_PACKAGES
from distutils import sysconfig
PYTHON_EXECUTABLE = sys.executable
PYTHON_INCLUDE_PATH = sysconfig.get_python_inc()
PYTHON_SITE_PACKAGES = sysconfig.get_python_lib()
# PYTHON_LIBRARY:
if os.name == 'posix':
# under linux, we want the location of libpython2.5.so, under a
# self-built installation, that's python-inst/lib/libpython2.5.so
# system installation is /usr/lib/libpython2.5.so
ldl = sysconfig.get_config_var('LDLIBRARY') # gives the SO name
ll = os.path.join(sysconfig.get_config_var('prefix'), 'lib')
PYTHON_LIBRARY = os.path.join(ll, ldl)
elif os.name == 'nt':
# under windows, we want Python25\libs\python25.lib (the link
# stub for the DLL)
# first derive python25.lib
ldl = 'python%s%s.lib' % \
tuple(sysconfig.get_python_version().split('.'))
# then figure out python25\libs
ll = os.path.join(sysconfig.get_config_var('prefix'), 'libs')
PYTHON_LIBRARY = os.path.join(ll, ldl)
if not os.path.exists(PYTHON_LIBRARY):
raise RuntimeError(
'!!!!! %s does not exist (python-dev installed?).' %
(PYTHON_LIBRARY,))
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import utils
import os
import shutil
import types
class InstallPackage:
"""All libraries that should be installed by johannes have to have
InstallPackage abstractions. This class defines which actions need to
be taken to get, configure, build and install a complete library /
software package.
"""
def get(self):
pass
def unpack(self):
pass
def configure(self):
pass
def build(self):
pass
def install(self):
pass
def clean_build(self):
"""This method should clean up in such a way that the next build
of this package will result in AT LEAST all steps from configure
and onwards. By default, it removes the build dir and calls
clean_install().
"""
utils.output("Removing build and installation directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
self.clean_install()
def clean_install(self):
""" Only cleans up the install directory.
"""
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def list(self):
""" Lists the methods of this install package.
(Sometimes I forget what the exact names are)
"""
atts = dir(self)
for att in atts:
if type(getattr(self, att)) == types.MethodType:
utils.output(att)
| Python |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
import glob
import os
import re
import sys, urllib
import shutil
import tarfile
import zipfile
import subprocess
def cmake_command(build_dir, source_dir, cmake_params):
"""Invoke correct cmake commands to configure a build directory.
@param build_dir: out-of-source build directory. method will
chdir there before invoking cmake
@param source_dir: location of the source that will be built
@cmake_params: string of "-Dparam=blaat -Dparam2=blaat" specifying
cmake parameters
"""
# first create correct cmake invocation
cmake = '%s %s' % (config.CMAKE_BINPATH, config.CMAKE_DEFAULT_PARAMS)
if len(config.CMAKE_PRE_VARS):
cmake = config.CMAKE_PRE_VARS + ' ' + cmake
# then go to build_dir
os.chdir(build_dir)
# then invoke cmake
ret = os.system("%s %s %s" %
(cmake, cmake_params, source_dir))
# on windows, we have to do this a second time (first time
# configures, second time generates)
if os.name == 'nt':
ret = os.system("%s %s %s" %
(cmake, cmake_params, source_dir))
return ret
def copy_glob(src_glob, dst_dir):
"""Copy all files and dirs included by src_glob into the directory specified in dst_dir.
e.g. usage: copy_glob('/etc/*', '/backup/my_etc/')
"""
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
if not os.path.isdir(dst_dir):
raise RuntimeError('%s is not a directory.' % (dst_dir,))
for fn in glob.glob(src_glob):
if os.path.isdir(fn):
# copytree needs full path in srt and dst
# e.g. copytree('/build/dir/numpy', 'python/lib/site-packages/numpy')
shutil.copytree(fn,os.path.join(dst_dir,os.path.basename(fn)), symlinks=True)
else:
# shutil is clever enough to take a directory as destination
shutil.copy(fn, dst_dir)
def find_command_with_ver(name, command, ver_re):
"""Try to run command, use ver_re regular expression to parse for
the version string. This will print for example:
CVS: version 2.11 found.
@return: True if command found, False if not or if version could
not be parsed.
"""
retval = False
s,o = get_status_output(command)
if s:
msg2 = 'NOT FOUND!'
else:
mo = re.search(ver_re, o, re.MULTILINE)
if mo:
msg2 = 'version %s found.' % (mo.groups()[0],)
retval = True
else:
msg2 = 'could not extract version.'
output("%s: %s" % (name, msg2))
return retval
def find_files(start_dir, re_pattern='.*\.(pyd|dll)', exclude_pats=[]):
"""Recursively find all files (not directories) with filenames
matching given regular expression. Case is ignored.
@param start_dir: search starts in this directory
@param re_pattern: regular expression with which all found files
will be matched. example: re_pattern = '.*\.(pyd|dll)' will match
all filenames ending in pyd or dll.
@param exclude_pats: if filename (without directory) matches any
one of these patterns, do not include it in the list
@return: list of fully qualified filenames that satisfy the
pattern
"""
cpat = re.compile(re_pattern, re.IGNORECASE)
found_files = []
excluded_files = []
for dirpath, dirnames, filenames in os.walk(start_dir):
ndirpath = os.path.normpath(os.path.abspath(dirpath))
for fn in filenames:
if cpat.match(fn):
# see if fn does not satisfy one of the exclude
# patterns
exclude_fn = False
for exclude_pat in exclude_pats:
if re.match(exclude_pat, fn, re.IGNORECASE):
exclude_fn = True
break
if not exclude_fn:
found_files.append(os.path.join(ndirpath,fn))
else:
excluded_files.append(os.path.join(ndirpath,fn))
return found_files, excluded_files
def get_status_output(command):
"""Run command, return output of command and exit code in status.
In general, status is None for success and 1 for command not
found.
"""
ph = os.popen(command)
output = ph.read()
status = ph.close()
return (status, output)
def output(message, rpad=0, rpad_char='#'):
s = "#####J> %s" % (message,)
pn = rpad - len(s)
if pn < 0:
pn = 0
p = pn * rpad_char
print "%s %s" % (s,p)
# flush the buffer, else things are out of sync in any log files
sys.stdout.flush()
def error(message):
raise RuntimeError('!!!!! %s' % (message,))
def file_exists(posix_file, nt_file):
"""Used to perform platform-specific file existence check.
"""
if os.name == 'posix':
fn = posix_file
else: # os.name == 'nt'
fn = nt_file
return os.path.exists(fn)
def human_size(num):
"""Method to convert number of bytes to human-readable version.
Code from http://blogmag.net/blog/read/38/Print_human_readable_file_size
"""
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
def make_command(solution_file, install=False, project=None,
win_buildtype=None):
"""Install packages can use this method to invoke the
platform-specific compile command. This can only be run after
config.init() has run.
@param solution_file: only used on Windows, ignored on *ix.
@param install: if true, invokes the make command to install the
built project.
@param project: Only build the named project on Windows. This
overrides the install setting!
@param win_buildtype: change the buildtype on windows, default
value is None, which gets translated to the value of
config.BUILD_TARGET.
"""
if os.name == 'posix':
if install:
make_command = '%s install' % (config.MAKE,)
else:
make_command = config.MAKE
else: # os.name == 'nt'
if install:
prj = 'INSTALL'
else:
prj = 'ALL_BUILD'
if project:
prj = project
if win_buildtype:
buildtype = win_buildtype
else:
buildtype = config.BUILD_TARGET
make_command = config.MAKE % \
(solution_file, prj, buildtype, buildtype)
return os.system(make_command)
def urlget(url, output_filename=None):
"""Simple method to retrieve URL. It will get the file in the current
directory.
If urlget guesses the wrong download filename based on the URL, pass
the output_filename parameter.
FIXME: this does not trap 404 errors. Seems the best way to do this is
to override FancyURLOpener with a new http_error_default
"""
def reporthook(blocknum, blocksize, totalsize):
current_size = blocknum * blocksize
current_size_kb = int(current_size / 1024.0)
sys.stdout.write(
'% 4.0f %% (%d Kbytes) downloaded\r' %
(current_size / float(totalsize) * 100.0, current_size_kb))
if output_filename:
filename = output_filename
else:
i = url.rfind('/')
filename = url[i+1:]
print url, "->", filename
if os.path.exists(filename):
output("%s already present, skipping download." % (filename,))
else:
urllib.urlretrieve(url, filename, reporthook)
sys.stdout.write("\n")
output("Download complete.")
return filename
def goto_archive():
os.chdir(config.archive_dir)
def goto_build():
os.chdir(config.build_dir)
def goto_inst():
os.chdir(config.inst_dir)
def unpack(archive_filename):
"""Unpacks given archive_filename in the current directory. It is
the caller's responsibility to make sure the current directory is
the desired destination.
It's preferable to make use of wrapper methods such as
unpack_build and unpack_install.
"""
tar = None
zip = None
if archive_filename.lower().endswith('bz2'):
m = 'r|bz2'
tar = tarfile.open(archive_filename, m)
elif archive_filename.lower().endswith('gz'):
m = 'r|gz'
tar = tarfile.open(archive_filename, m)
else:
zip = zipfile.ZipFile(archive_filename)
if tar:
# extractall is from python 2.5 onwards
# tar.extractall()
# we use a form that works on previous versions as well
for tarinfo in tar:
print tarinfo.name
tar.extract(tarinfo)
tar.close()
else:
for zipinfo in zip.infolist():
# first check if we need to create the directory housing
# the file
dn = os.path.dirname(zipinfo.filename)
if dn and not os.path.isdir(dn):
os.makedirs(dn)
# we only extract the file if it's not purely a directory
if not os.path.isdir(zipinfo.filename):
print "%s - %s" % (zipinfo.filename, \
human_size(zipinfo.file_size))
# have to write this in binary mode, else we screw up
# binaries (EXEs and such) quite badly. :)
f = open(zipinfo.filename, 'wb')
f.write(zip.read(zipinfo.filename))
f.close()
zip.close()
def unpack_archive(archive_filename):
"""Unpack given archive_filename in the archive (sources) directory.
"""
goto_archive()
unpack(archive_filename)
def unpack_build(archive_filename):
"""Unpack given archive_filename in build directory.
"""
goto_build()
unpack(archive_filename)
def unpack_inst(archive_filename):
"""Unpack given archive_filename in installation directory.
"""
goto_inst()
unpack(archive_filename)
def re_sub_filter_file(repls, filename):
"""Given a list of repls (tuples with regular expresions and
replacement patterns that are used as the first and second params
of re.sub), filter filename line by line.
A backup of the file is made to filename.orig.
"""
newfilename = '%s.new' % (filename,)
origfilename = '%s.orig' % (filename,)
shutil.copyfile(filename, origfilename)
ifile = file(filename)
ofile = file(newfilename, 'w')
for l in ifile:
for r in repls:
l = re.sub(r[0], r[1], l)
ofile.write(l)
ifile.close()
ofile.close()
shutil.copyfile(newfilename, filename)
os.unlink(newfilename)
os.unlink(origfilename)
def execute_in_vs_environment(post_commands, pre_commands='', communicate=''):
""" Executes the specified commands as if from the Visual Studio
command prompt. "vcvarsall.bat" needs to be on the PATH for this.
post_commands: Commands executed after setting up the environment.
This should be one string (separate using '&').
pre_commands: Executed before setting the environment.
communicate: Command sent to stdin after post_commands.
"""
if config.WINARCH == '64bit':
astr = 'amd64'
else:
astr = 'x86'
if pre_commands:
if pre_commands[-1] != '&':
pre_commands += '&'
if post_commands:
if post_commands[0] != '&':
post_commands = '&' + post_commands
p = subprocess.Popen('%s%s %s%s' % (
pre_commands,
"vcvarsall.bat",
astr,
post_commands),
shell=True, stdin=subprocess.PIPE)
if communicate:
p.communicate(communicate)
return p.wait()
| Python |
#!/usr/bin/env python
#
# $Id: setup.py,v 1.11 2005/02/15 16:32:22 warnes Exp $
CVS=0
from distutils.core import setup, Command, Extension
from SOAPpy.version import __version__
url="http://pywebsvcs.sf.net/"
long_description="SOAPpy provides tools for building SOAP clients and servers. For more information see " + url
if CVS:
import time
__version__ += "_CVS_" + time.strftime('%Y_%m_%d')
setup(name="SOAPpy",
version=__version__,
description="SOAP Services for Python",
maintainer="Gregory Warnes",
maintainer_email="Gregory.R.Warnes@Pfizer.com",
url = url,
long_description=long_description,
packages=['SOAPpy','SOAPpy/wstools']
)
| Python |
#!/usr/bin/env python
# This server validates as of 4/23/01 when run with UserLand's SOAP validator
# (http://validator.soapware.org/).
import getopt
import sys
sys.path.insert (1, '..')
from SOAPpy import SOAP
ident = '$Id: soapware.py,v 1.2 2003/03/08 05:10:01 warnes Exp $'
def whichToolkit ():
return SOAP.SOAPUserAgent ()
def countTheEntities (s):
counts = {'ctLeftAngleBrackets': 0, 'ctRightAngleBrackets': 0,
'ctAmpersands': 0, 'ctApostrophes': 0, 'ctQuotes': 0}
for i in s:
if i == '<':
counts['ctLeftAngleBrackets'] += 1
elif i == '>':
counts['ctRightAngleBrackets'] += 1
elif i == '&':
counts['ctAmpersands'] += 1
elif i == "'":
counts['ctApostrophes'] += 1
elif i == '"':
counts['ctQuotes'] += 1
return counts
def easyStructTest (stooges):
return stooges['larry'] + stooges['moe'] + stooges['curly']
def echoStructTest (myStruct):
return myStruct
def manyTypesTest (num, bool, state, doub, dat, bin):
return [num, SOAP.booleanType (bool), state, doub,
SOAP.dateTimeType (dat), bin]
def moderateSizeArrayCheck (myArray):
return myArray[0] + myArray[-1]
def nestedStructTest (myStruct):
return easyStructTest (myStruct.year2000.month04.day01)
def simpleStructReturnTest (myNumber):
return {'times10': myNumber * 10, 'times100': myNumber * 100,
'times1000': myNumber * 1000}
namespace = 'http://www.soapware.org/'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8080
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print error
print """usage: %s [options]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also. The default (if any) is shown in brackets.
-?, --help display this usage
-h, --host=HOST use HOST in the address to listen on [%s]
-p, --port=PORT listen on PORT [%d]
""" % (sys.argv[0], DEFAULT_HOST, DEFAULT_PORT),
sys.exit (0)
def main ():
host = DEFAULT_HOST
port = DEFAULT_PORT
try:
opts, args = getopt.getopt (sys.argv[1:], '?h:p:',
['help', 'host', 'port'])
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-h', '--host'):
host = arg
elif opt in ('-p', '--port'):
port = int (arg)
else:
raise AttributeError, \
"Recognized but unimplemented option `%s'" % opt
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
server = SOAP.SOAPServer ((host, port))
server.registerFunction (whichToolkit, namespace)
server.registerFunction (countTheEntities)
server.registerFunction (easyStructTest)
server.registerFunction (echoStructTest)
server.registerFunction (manyTypesTest)
server.registerFunction (moderateSizeArrayCheck)
server.registerFunction (nestedStructTest)
server.registerFunction (simpleStructReturnTest)
server.serve_forever()
if __name__ == '__main__':
try:
sys.exit (main ())
except KeyboardInterrupt:
sys.exit (0)
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
# This is a server for the XMethods matrix
# (http://jake.soapware.org/currentXmethodsResults).
import getopt
import sys
sys.path.insert (1, '..')
from SOAPpy import SOAP
if SOAP.Config.SSLserver:
from M2Crypto import SSL
ident = '$Id: silabserver.py,v 1.2 2003/03/08 05:10:01 warnes Exp $'
def echoFloat (inputFloat):
return inputFloat
def echoFloatArray (inputFloatArray):
return inputFloatArray
def echoInteger (inputInteger):
return inputInteger
def echoIntegerArray (inputIntegerArray):
return inputIntegerArray
def echoString (inputString):
return inputString
def echoStringArray (inputStringArray):
return inputStringArray
def echoStruct (inputStruct):
return inputStruct
def echoStructArray (inputStructArray):
return inputStructArray
def echoVoid ():
return SOAP.voidType()
def echoDate (inputDate):
return SOAP.dateTimeType (inputDate)
def echoBase64 (inputBase64):
return SOAP.binaryType (inputBase64)
namespace = 'http://soapinterop.org/'
DEFAULT_HOST = 'localhost'
DEFAULT_HTTP_PORT = 8080
DEFAULT_HTTPS_PORT = 8443
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print error
print """usage: %s [options]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also. The default (if any) is shown in brackets.
-?, --help display this usage
-h, --host=HOST use HOST in the address to listen on [%s]
-p, --port=PORT listen on PORT [%d]
""" % (sys.argv[0], DEFAULT_HOST, DEFAULT_HTTP_PORT),
if SOAP.Config.SSLserver:
print " -s, --ssl serve using SSL"
sys.exit (0)
def main ():
host = DEFAULT_HOST
port = None
ssl = 0
try:
opts = '?h:p:'
args = ['help', 'host', 'port']
if SOAP.Config.SSLserver:
opts += 's'
args += ['ssl']
opts, args = getopt.getopt (sys.argv[1:], opts, args)
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-h', '--host'):
host = arg
elif opt in ('-p', '--port'):
port = int (arg)
elif opt in ('-s', '--ssl'):
ssl = 1
else:
raise AttributeError, \
"Recognized but unimplemented option `%s'" % opt
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
if port == None:
port = [DEFAULT_HTTP_PORT, DEFAULT_HTTPS_PORT][ssl]
if ssl:
ssl_context = SSL.Context()
ssl_context.load_cert('server.pem')
else:
ssl_context = None
server = SOAP.SOAPServer ((host, port), namespace = namespace,
ssl_context = ssl_context)
server.registerFunction (echoFloat)
server.registerFunction (echoFloatArray)
server.registerFunction (echoInteger)
server.registerFunction (echoIntegerArray)
server.registerFunction (echoString)
server.registerFunction (echoStringArray)
server.registerFunction (echoStruct)
server.registerFunction (echoStructArray)
server.registerFunction (echoVoid)
server.registerFunction (echoDate)
server.registerFunction (echoBase64)
server.serve_forever()
if __name__ == '__main__':
try:
sys.exit (main ())
except KeyboardInterrupt:
sys.exit (0)
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
# This set of clients validates when run against the servers in
# silab.servers.
import copy
import fileinput
import getopt
import re
import string
import sys
import time
import traceback
sys.path.insert (1, '..')
from SOAPpy import SOAP
SOAP.Config.typesNamespace = SOAP.NS.XSD3
SOAP.Config.typesNamespace = SOAP.NS.XSD3
ident = '$Id: silabclient.py,v 1.2 2003/03/08 05:10:01 warnes Exp $'
DEFAULT_SERVERS_FILE = 'silab.servers'
DEFAULT_METHODS = \
(
'actorShouldPass', 'actorShouldFail',
'echoDate', 'echoBase64',
'echoFloat', 'echoFloatArray',
'echoFloatINF', 'echoFloatNaN',
'echoFloatNegINF', 'echoFloatNegZero',
'echoInteger', 'echoIntegerArray',
'echoString', 'echoStringArray',
'echoStruct', 'echoStructArray',
'echoVeryLargeFloat', 'echoVerySmallFloat',
'echoVoid',
'mustUnderstandEqualsOne', 'mustUnderstandEqualsZero',
)
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print error
print """usage: %s [options] [server ...]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also.
-?, --help display this usage
-d, --debug turn on debugging in the SOAP library
-e, --exit-on-failure exit on the first (unexpected) failure
-h, --harsh turn on harsh testing:
- look for the documented error code from
mustUnderstand failures
- use non-ASCII strings in the string tests
-i, --invert test servers *not* in the list of servers given
-m, --method=METHOD#[,METHOD#...]
call only the given methods, specify a METHOD# of ?
for the list of method numbers
-n, --no-stats, --no-statistics
don't display success and failure statistics
-N, --no-boring-stats, --no-boring-statistics
only display unexpected failures and unimplemented
tests, and only if non-zero
-o, --output=TYPE turn on output, TYPE is one or more of s(uccess),
f(ailure), n(ot implemented), F(ailed (as expected)),
a(ll)
[f]
-s, --servers=FILE use FILE as list of servers to test [%s]
-t, --stacktrace print a stack trace on each unexpected failure
-T, --always-stacktrace
print a stack trace on any failure
""" % (sys.argv[0], DEFAULT_SERVERS_FILE),
sys.exit (0)
def methodUsage ():
sys.stdout = sys.stderr
print "Methods are specified by number. Multiple methods can be " \
"specified using a\ncomma-separated list of numbers or ranges. " \
"For example 1,4-6,8 specifies\nmethods 1, 4, 5, 6, and 8.\n"
print "The available methods are:\n"
half = (len (DEFAULT_METHODS) + 1) / 2
for i in range (half):
print "%4d. %-25s" % (i + 1, DEFAULT_METHODS[i]),
if i + half < len (DEFAULT_METHODS):
print "%4d. %-25s" % (i + 1 + half, DEFAULT_METHODS[i + half]),
print
sys.exit (0)
# as borrowed from jake.soapware.org for float compares.
def nearlyeq (a, b, prec = 1e-7):
return abs (a - b) <= abs (a) * prec
def readServers (file):
servers = []
names = {}
cur = None
f = fileinput.input(file)
for line in f:
if line[0] == '#':
continue
if line == '' or line[0] == '\n':
cur = None
continue
if cur == None:
cur = {'nonfunctional': {}, '_line': f.filelineno(),
'_file': f.filename()}
tag = None
servers.append (cur)
if line[0] in string.whitespace:
if tag == 'nonfunctional':
value = method + ' ' + cur[tag][method]
else:
value = cur[tag]
value += ' ' + line.strip ()
elif line[0] == '_':
raise ValueError, \
"%s, line %d: can't have a tag starting with `_'" % \
(f.filename(), f.filelineno())
else:
tag, value = line.split (':', 1)
tag = tag.strip ().lower ()
value = value.strip ()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if tag == 'typed':
if value.lower() in ('0', 'no', 'false'):
value = 0
elif value.lower() in ('1', 'yes', 'false'):
value = 1
else:
raise ValueError, \
"%s, line %d: unknown typed value `%s'" % \
(f.filename(), f.filelineno(), value)
elif tag == 'name':
if names.has_key(value):
old = names[value]
raise ValueError, \
"%s, line %d: already saw a server named `%s' " \
"(on line %d of %s)" % \
(f.filename(), f.filelineno(), value,
old['_line'], old['_file'])
names[value] = cur
if tag == 'nonfunctional':
value = value.split (' ', 1) + ['']
method = value[0]
cur[tag][method] = value[1]
elif tag == 'functional':
try:
del cur['nonfunctional'][value]
except:
raise ValueError, \
"%s, line %d: `%s' not marked nonfunctional" % \
(f.filename(), f.filelineno(), value)
elif tag == 'like':
try:
new = copy.deepcopy(names[value])
except:
raise ValueError, \
"%s, line %d: don't know about a server named `%s'" % \
(f.filename(), f.filelineno(), value)
# This is so we don't lose the nonfunctional methods in new or
# in cur
new['nonfunctional'].update(cur['nonfunctional'])
del cur['nonfunctional']
new.update(cur)
# This is because servers and possibly names has a reference to
# cur, so we have to keep working with cur so changes are
# reflected in servers and names.
cur.update(new)
else:
cur[tag] = value
return servers
def str2list (s):
l = {}
for i in s.split (','):
if i.find ('-') != -1:
i = i.split ('-')
for i in range (int (i[0]),int (i[1]) + 1):
l[i] = 1
else:
l[int (i)] = 1
l = l.keys ()
l.sort ()
return l
def testActorShouldPass (server, action, harsh):
test = 42
server = server._sa (action % {'methodname': 'echoInteger'})
hd = SOAP.headerType ()
hd.InteropTestHeader = SOAP.stringType ("This shouldn't fault because "
"the mustUnderstand attribute is 0")
hd.InteropTestHeader._setMustUnderstand (0)
hd.InteropTestHeader._setActor (
'http://schemas.xmlsoap.org/soap/actor/next')
server = server._hd (hd)
result = server.echoInteger (inputInteger = test)
if not SOAP.Config.typed:
result = int (result)
if result != test:
raise Exception, "expected %s, got %s" % (test, result)
def testActorShouldFail (server, action, harsh):
test = 42
server = server._sa (action % {'methodname': 'echoInteger'})
hd = SOAP.headerType ()
hd.InteropTestHeader = SOAP.stringType ("This should fault because "
"the mustUnderstand attribute is 1")
hd.InteropTestHeader._setMustUnderstand (1)
hd.InteropTestHeader._setActor (
'http://schemas.xmlsoap.org/soap/actor/next')
server = server._hd (hd)
try:
result = server.echoInteger (inputInteger = test)
except SOAP.faultType, e:
if harsh and e.faultcode != 'SOAP-ENV:MustUnderstand':
raise AttributeError, "unexpected faultcode %s" % e.faultcode
return
raise Exception, "should fail, succeeded with %s" % result
def testEchoFloat (server, action, harsh):
server = server._sa (action % {'methodname': 'echoFloat'})
for test in (0.0, 1.0, -1.0, 3853.33333333):
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if not nearlyeq (result, test):
raise Exception, "expected %.8f, got %.8f" % (test, result)
def testEchoFloatArray (server, action, harsh):
test = [0.0, 1.0, -1.0, 3853.33333333]
server = server._sa (action % {'methodname': 'echoFloatArray'})
result = server.echoFloatArray (inputFloatArray = test)
for i in range (len (test)):
if not SOAP.Config.typed:
result[i] = float (result[i])
if not nearlyeq (result[i], test[i]):
raise Exception, "@ %d expected %s, got %s" % \
(i, repr (test), repr (result))
def testEchoFloatINF (server, action, harsh):
try:
test = float ('INF')
except:
test = float (1e300**2)
server = server._sa (action % {'methodname': 'echoFloat'})
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if result != test:
raise Exception, "expected %.8f, got %.8f" % (test, result)
def testEchoFloatNaN (server, action, harsh):
try:
test = float ('NaN')
except:
test = float (0.0)
server = server._sa (action % {'methodname': 'echoFloat'})
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if result != test:
raise Exception, "expected %.8f, got %.8f" % (test, result)
def testEchoFloatNegINF (server, action, harsh):
try:
test = float ('-INF')
except:
test = float (-1e300**2)
server = server._sa (action % {'methodname': 'echoFloat'})
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if result != test:
raise Exception, "expected %.8f, got %.8f" % (test, result)
def testEchoFloatNegZero (server, action, harsh):
test = float ('-0.0')
server = server._sa (action % {'methodname': 'echoFloat'})
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if result != test:
raise Exception, "expected %.8f, got %.8f" % (test, result)
def testEchoInteger (server, action, harsh):
server = server._sa (action % {'methodname': 'echoInteger'})
for test in (0, 1, -1, 3853):
result = server.echoInteger (inputInteger = test)
if not SOAP.Config.typed:
result = int (result)
if result != test:
raise Exception, "expected %.8f, got %.8f" % (test, result)
def testEchoIntegerArray (server, action, harsh):
test = [0, 1, -1, 3853]
server = server._sa (action % {'methodname': 'echoIntegerArray'})
result = server.echoIntegerArray (inputIntegerArray = test)
for i in range (len (test)):
if not SOAP.Config.typed:
result[i] = int (result[i])
if result[i] != test[i]:
raise Exception, "@ %d expected %s, got %s" % \
(i, repr (test), repr (result))
relaxedStringTests = ['', 'Hello', '\'<&>"',]
relaxedStringTests = ['Hello', '\'<&>"',]
harshStringTests = ['', 'Hello', '\'<&>"',
u'\u0041', u'\u00a2', u'\u0141', u'\u2342',
u'\'<\u0041&>"', u'\'<\u00a2&>"', u'\'<\u0141&>"', u'\'<\u2342&>"',]
def testEchoString (server, action, harsh):
if harsh:
test = harshStringTests
else:
test = relaxedStringTests
server = server._sa (action % {'methodname': 'echoString'})
for test in test:
result = server.echoString (inputString = test)
if result != test:
raise Exception, "expected %s, got %s" % \
(repr (test), repr (result))
def testEchoStringArray (server, action, harsh):
if harsh:
test = harshStringTests
else:
test = relaxedStringTests
server = server._sa (action % {'methodname': 'echoStringArray'})
result = server.echoStringArray (inputStringArray = test)
if result != test:
raise Exception, "expected %s, got %s" % (repr (test), repr (result))
def testEchoStruct (server, action, harsh):
test = {'varFloat': 2.256, 'varInt': 474, 'varString': 'Utah'}
server = server._sa (action % {'methodname': 'echoStruct'})
result = server.echoStruct (inputStruct = test)
if not SOAP.Config.typed:
result.varFloat = float (result.varFloat)
result.varInt = int (result.varInt)
if not nearlyeq (test['varFloat'], result.varFloat):
raise Exception, ".varFloat expected %s, got %s" % \
(i, repr (test['varFloat']), repr (result.varFloat))
for i in test.keys ():
if i == 'varFloat':
continue
if test[i] != getattr (result, i):
raise Exception, ".%s expected %s, got %s" % \
(i, repr (test[i]), repr (getattr (result, i)))
def testEchoStructArray (server, action, harsh):
test = [{'varFloat': -5.398, 'varInt': -546, 'varString': 'West Virginia'},
{'varFloat': -9.351, 'varInt': -641, 'varString': 'New Mexico'},
{'varFloat': 1.495, 'varInt': -819, 'varString': 'Missouri'}]
server = server._sa (action % {'methodname': 'echoStructArray'})
result = server.echoStructArray (inputStructArray = test)
for s in range (len (test)):
if not SOAP.Config.typed:
result[s].varFloat = float (result[s].varFloat)
result[s].varInt = int (result[s].varInt)
if not nearlyeq (test[s]['varFloat'], result[s].varFloat):
raise Exception, \
"@ %d.varFloat expected %s, got %s" % \
(s, repr (test[s]['varFloat']), repr (result[s].varFloat))
for i in test[s].keys ():
if i == 'varFloat':
continue
if test[s][i] != getattr (result[s], i):
raise Exception, "@ %d.%s expected %s, got %s" % \
(s, i, repr (test[s][i]), repr (getattr (result[s], i)))
def testEchoVeryLargeFloat (server, action, harsh):
test = 2.2535e29
server = server._sa (action % {'methodname': 'echoFloat'})
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if not nearlyeq (result, test):
raise Exception, "expected %s, got %s" % (repr (test), repr (result))
def testEchoVerySmallFloat (server, action, harsh):
test = 2.2535e29
server = server._sa (action % {'methodname': 'echoFloat'})
result = server.echoFloat (inputFloat = test)
if not SOAP.Config.typed:
result = float (result)
if not nearlyeq (result, test):
raise Exception, "expected %s, got %s" % (repr (test), repr (result))
def testEchoVoid (server, action, harsh):
server = server._sa (action % {'methodname': 'echoVoid'})
result = server.echoVoid ()
for k in result.__dict__.keys ():
if k[0] != '_':
raise Exception, "expected an empty structType, got %s" % \
repr (result.__dict__)
def testMustUnderstandEqualsOne (server, action, harsh):
test = 42
server = server._sa (action % {'methodname': 'echoInteger'})
hd = SOAP.headerType ()
hd.MustUnderstandThis = SOAP.stringType ("This should fault because "
"the mustUnderstand attribute is 1")
hd.MustUnderstandThis._setMustUnderstand (1)
server = server._hd (hd)
try:
result = server.echoInteger (inputInteger = test)
except SOAP.faultType, e:
if harsh and e.faultcode != 'SOAP-ENV:MustUnderstand':
raise AttributeError, "unexpected faultcode %s" % e.faultcode
return
raise Exception, "should fail, succeeded with %s" % result
def testMustUnderstandEqualsZero (server, action, harsh):
test = 42
server = server._sa (action % {'methodname': 'echoInteger'})
hd = SOAP.headerType ()
hd.MustUnderstandThis = SOAP.stringType ("This shouldn't fault because "
"the mustUnderstand attribute is 0")
hd.MustUnderstandThis._setMustUnderstand (0)
server = server._hd (hd)
result = server.echoInteger (inputInteger = test)
if not SOAP.Config.typed:
result = int (result)
if result != test:
raise Exception, "expected %s, got %s" % (test, result)
def testEchoDate (server, action, harsh):
test = time.gmtime (time.time ())
server = server._sa (action % {'methodname': 'echoDate'})
if SOAP.Config.namespaceStyle == '1999':
result = server.echoDate (inputDate = SOAP.timeInstantType (test))
else:
result = server.echoDate (inputDate = SOAP.dateTimeType (test))
if not SOAP.Config.typed and type (result) in (type (''), type (u'')):
p = SOAP.SOAPParser()
result = p.convertDateTime(result, 'timeInstant')
if result != test[:6]:
raise Exception, "expected %s, got %s" % (repr (test), repr (result))
def testEchoBase64 (server, action, harsh):
test = '\x00\x10\x20\x30\x40\x50\x60\x70\x80\x90\xa0\xb0\xc0\xd0\xe0\xf0'
server = server._sa (action % {'methodname': 'echoBase64'})
result = server.echoBase64 (inputBase64 = SOAP.base64Type (test))
if not SOAP.Config.typed:
import base64
result = base64.decodestring(result)
if result != test:
raise Exception, "expected %s, got %s" % (repr (test), repr (result))
def main ():
stats = 1
total = 0
fail = 0
failok = 0
succeed = 0
exitonfailure = 0
harsh = 0
invert = 0
printtrace = 0
methodnums = None
notimp = 0
output = 'f'
servers = DEFAULT_SERVERS_FILE
started = time.time ()
try:
opts, args = getopt.getopt (sys.argv[1:], '?dehim:nNo:s:tT',
['help', 'debug', 'exit-on-failure', 'harsh', 'invert',
'method', 'no-stats', 'no-statistics',
'no-boring-statistics', 'no-boring-stats', 'output',
'servers=', 'stacktrace', 'always-stacktrace'])
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-d', '--debug'):
SOAP.Config.debug = 1
elif opt in ('-h', '--harsh'):
harsh = 1
elif opt in ('-i', '--invert'):
invert = 1
elif opt in ('-e', '--exit-on-failure'):
exitonfailure = 1
elif opt in ('-m', '--method'):
if arg == '?':
methodUsage ()
methodnums = str2list (arg)
elif opt in ('-n', '--no-stats', '--no-statistics'):
stats = 0
elif opt in ('-N', '--no-boring-stats', '--no-boring-statistics'):
stats = -1
elif opt in ('-o', '--output'):
output = arg
elif opt in ('-s', '--servers'):
servers = arg
elif opt in ('-t', '--stacktrace'):
printtrace = 1
elif opt in ('-T', '--always-stacktrace'):
printtrace = 2
else:
raise AttributeError, \
"Recognized but unimplemented option `%s'" % opt
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
if 'a' in output:
output = 'fFns'
servers = readServers (servers)
if methodnums == None:
methodnums = range (1, len (DEFAULT_METHODS) + 1)
limitre = re.compile ('|'.join (args), re.IGNORECASE)
for s in servers:
if (not not limitre.match (s['name'])) == invert:
continue
try: typed = s['typed']
except: typed = 1
try: style = s['style']
except: style = 1999
SOAP.Config.typed = typed
SOAP.Config.namespaceStyle = style
server = SOAP.SOAPProxy (s['endpoint'], ("m", s['namespace']))
for num in (methodnums):
if num > len (DEFAULT_METHODS):
break
total += 1
name = DEFAULT_METHODS[num - 1]
title = '%s: %s (#%d)' % (s['name'], name, num)
if SOAP.Config.debug:
print "%s:" % title
try:
fn = globals ()['test' + name[0].upper () + name[1:]]
except KeyboardInterrupt:
raise
except:
if 'n' in output:
print title, "test not yet implemented"
notimp += 1
continue
try:
fn (server, s['soapaction'], harsh)
if s['nonfunctional'].has_key (name):
print title, \
"succeeded despite being marked nonfunctional"
if 's' in output:
print title, "succeeded"
succeed += 1
except KeyboardInterrupt:
raise
except:
fault = str (sys.exc_info ()[1])
if fault[-1] == '\n':
fault = fault[:-1]
if s['nonfunctional'].has_key (name):
if 'F' in output:
t = 'as expected'
if s['nonfunctional'][name] != '':
t += ', ' + s['nonfunctional'][name]
print title, "failed (%s) -" % t, fault
if printtrace > 1:
traceback.print_exc ()
failok += 1
else:
if 'f' in output:
print title, "failed -", fault
if printtrace:
traceback.print_exc ()
fail += 1
if exitonfailure:
return -1
if stats:
print " Tests started at:", time.ctime (started)
if stats > 0:
print " Total tests: %d" % total
print " Successes: %d (%3.2f%%)" % \
(succeed, 100.0 * succeed / total)
if stats > 0 or fail > 0:
print "Failed unexpectedly: %d (%3.2f%%)" % \
(fail, 100.0 * fail / total)
if stats > 0:
print " Failed as expected: %d (%3.2f%%)" % \
(failok, 100.0 * failok / total)
if stats > 0 or notimp > 0:
print " Not implemented: %d (%3.2f%%)" % \
(notimp, 100.0 * notimp / total)
return fail + notimp
if __name__ == '__main__':
try:
sys.exit (main ())
except KeyboardInterrupt:
sys.exit (0)
| Python |
#!/usr/bin/env python
# Copyright (c) 2001, actzero, inc.
import sys
sys.path.insert(1,"..")
from SOAPpy import SOAP
#SOAP.Config.debug = 1
serverstring = "SOAP.py (actzero.com) running "+sys.platform
NUMBUYS = 0
NUMSIMPLEBUYS = 0
NUMREQUESTS = 0
NUMPINGS = 0
def SimpleBuy(Address, ProductName, Quantity):
# currently, this type-checks the params, and makes sure
# the strings are of len > 0
global NUMSIMPLEBUYS
NUMSIMPLEBUYS += 1
if Quantity < 1: raise ValueError, "must order at least one"
else: return "Receipt for %d %s(s) bought from %s" % (int(Quantity), ProductName, serverstring)
def RequestForQuote(ProductName, Quantity):
# type-checks and makes sure Quantity >= 1
global NUMREQUESTS
NUMREQUESTS += 1
if Quantity < 1: raise ValueError, "must order at least 1"
else:
import whrandom
mult = whrandom.random()
times = 0
while mult > 0.25:
mult = mult - 0.25
times += 1
mult += 0.5
mult = round(mult, 3)
print mult, times
return SOAP.doubleType(round(mult*int(Quantity),2))
def Buy(**kw):
global NUMBUYS
NUMBUYS += 1
try:
PurchaseOrder = kw["PurchaseOrder"]
except:
PurchaseOrder = kw["PO"]
try:
POkeys = PurchaseOrder['_keyord']
POkeys.sort()
POkeys_expected = ["shipTo","billTo","items","poID","createDate"]
POkeys_expected.sort()
if POkeys != POkeys_expected:
raise ValueError, "struct 'PurchaseOrder' needs %s, %s, %s, %s, and %s" % tuple(POkeys_expected)
except:
raise TypeError, "'PurchaseOrder' missing one or more element(s)"
try:
btkeys = PurchaseOrder["billTo"]["_keyord"]
btkeys.sort()
btkeys_expected = ["address","zipCode","name","state","city"]
btkeys_expected.sort()
except:
raise TypeError, "'billTo' missing one or more elements"
try:
stkeys = PurchaseOrder["shipTo"]["_keyord"]
stkeys.sort()
stkeys_expected = ["address","zipCode","name","state","city"]
stkeys_expected.sort()
except:
raise TypeError, "'shipTo' missing one or more elements"
try:
items = PurchaseOrder["items"].__dict__
data = items["data"]
retstring = ""
for item in data:
itemdict = item["_asdict"]
q = itemdict["quantity"]
p = itemdict["price"]
name = itemdict["name"]
if retstring != "":
retstring += ", "
else:
retstring = "bought "
retstring += "%d %s(s) for %.2f" % (q,name,p)
retstring += " from "+serverstring
return retstring
except:
raise TypeError, "items must be an array of 'item' structs"
def Ping():
global NUMPINGS
NUMPINGS += 1
return
def Monitor(str):
if str=="actzero":
global NUMBUYS
global NUMREQUESTS
global NUMSIMPLEBUYS
global NUMPINGS
return "(Buys, RequestForQuote(s),SimpleBuy(s), Ping(s)) = " + \
repr( (NUMBUYS,NUMREQUESTS,NUMSIMPLEBUYS, NUMPINGS) )
else:
raise ValueError, "not the right string"
def Clear(str):
if str=="actzero":
global NUMBUYS
global NUMREQUESTS
global NUMSIMPLEBUYS
global NUMPINGS
NUMBUYS = 0
NUMREQUESTS = 0
NUMSIMPLEBUYS = 0
NUMPINGS = 0
return "(Buys, RequestForQuote(s),SimpleBuy(s), Ping(s)) = " + \
repr( (NUMBUYS,NUMREQUESTS,NUMSIMPLEBUYS, NUMPINGS) )
else:
raise ValueError, "not the right string"
if __name__ == "__main__":
if len(sys.argv) > 1:
try:
port = int(sys.argv[1])
if port not in range(2000,15000): raise ValueError
except:
print "port must be a number between 2000 and 15000"
sys.exit(1)
else: port = 9000
namespace = "http://www.soapinterop.org/Bid"
server = SOAP.SOAPServer( ('zoo',port) )
server.registerKWFunction(SimpleBuy, namespace )
server.registerKWFunction(RequestForQuote, namespace )
server.registerKWFunction(Buy, namespace )
server.registerKWFunction(Ping, namespace )
server.registerKWFunction(Monitor, namespace )
server.registerKWFunction(Clear, namespace )
try:
server.serve_forever()
except KeyboardInterrupt:
pass
| Python |
#!/usr/bin/env python
import getopt
import sys
import string
import re
import time
sys.path.insert(1,"..")
from SOAPpy import SOAP
import traceback
DEFAULT_SERVERS_FILE = './inventory.servers'
DEFAULT_METHODS = ('SimpleBuy', 'RequestForQuote','Buy','Ping')
def usage (error = None):
sys.stdout = sys.stderr
if error != None:
print error
print """usage: %s [options] [server ...]
If a long option shows an argument is mandatory, it's mandatory for the
equivalent short option also.
-?, --help display this usage
-d, --debug turn on debugging in the SOAP library
-i, --invert test servers *not* in the list of servers given
-m, --method=METHOD#[,METHOD#...]
call only the given methods, specify a METHOD# of ?
for the list of method numbers
-o, --output=TYPE turn on output, TYPE is one or more of s(uccess),
f(ailure), n(ot implemented), F(ailed (as expected)),
a(ll)
[f]
-s, --servers=FILE use FILE as list of servers to test [%s]
-t, --stacktrace print a stack trace on each unexpected failure
-T, --always-stacktrace
print a stack trace on any failure
""" % (sys.argv[0], DEFAULT_SERVERS_FILE),
sys.exit (0)
def methodUsage ():
sys.stdout = sys.stderr
print "Methods are specified by number. Multiple methods can be " \
"specified using a\ncomma-separated list of numbers or ranges. " \
"For example 1,4-6,8 specifies\nmethods 1, 4, 5, 6, and 8.\n"
print "The available methods are:\n"
half = (len (DEFAULT_METHODS) + 1) / 2
for i in range (half):
print "%4d. %-25s" % (i + 1, DEFAULT_METHODS[i]),
if i + half < len (DEFAULT_METHODS):
print "%4d. %-25s" % (i + 1 + half, DEFAULT_METHODS[i + half]),
print
sys.exit (0)
def readServers (file):
servers = []
f = open (file, 'r')
while 1:
line = f.readline ()
if line == '':
break
if line[0] in ('#', '\n') or line[0] in string.whitespace:
continue
cur = {'nonfunctional': {}}
tag = None
servers.append (cur)
while 1:
if line[0] in string.whitespace:
if tag == 'nonfunctional':
value = method + ' ' + cur[tag][method]
else:
value = cur[tag]
value += ' ' + line.strip ()
else:
tag, value = line.split (':', 1)
tag = tag.strip ().lower ()
value = value.strip ()
if value[0] == '"' and value[-1] == '"':
value = value[1:-1]
if tag == 'nonfunctional':
value = value.split (' ', 1) + ['']
method = value[0]
cur[tag][method] = value[1]
else:
cur[tag] = value
line = f.readline ()
if line == '' or line[0] == '\n':
break
return servers
def str2list (s):
l = {}
for i in s.split (','):
if i.find ('-') != -1:
i = i.split ('-')
for i in range (int (i[0]),int (i[1]) + 1):
l[i] = 1
else:
l[int (i)] = 1
l = l.keys ()
l.sort ()
return l
def SimpleBuy(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'SimpleBuy'})
return serv.SimpleBuy(ProductName="widget", Quantity = 50, Address = "this is my address") #JHawk, Phalanx require this order of params
def RequestForQuote(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'RequestForQuote'})
return serv.RequestForQuote(Quantity=3, ProductName = "thing") # for Phalanx, JHawk
def Buy(serv, sa, epname):
import copy
serv = serv._sa (sa % {'methodname':'Buy'})
billTo_d = {"name":"Buyer One", "address":"1 1st Street",
"city":"New York", "state":"NY", "zipCode":"10000"}
shipTo_d = {"name":"Buyer One ", "address":"1 1st Street ",
"city":"New York ", "state":"NY ", "zipCode":"10000 "}
for k,v in shipTo_d.items():
shipTo_d[k] = v[:-1]
itemd1 = SOAP.structType( {"name":"widg1","quantity":200,"price":SOAP.decimalType(45.99), "_typename":"LineItem"})
itemd2 = SOAP.structType( {"name":"widg2","quantity":400,"price":SOAP.decimalType(33.45), "_typename":"LineItem"})
items_d = SOAP.arrayType( [itemd1, itemd2] )
items_d._ns = "http://www.soapinterop.org/Bid"
po_d = SOAP.structType( data = {"poID":"myord","createDate":SOAP.dateTimeType(),"shipTo":shipTo_d, "billTo":billTo_d, "items":items_d})
try:
# it's called PO by MST (MS SOAP Toolkit), JHawk (.NET Remoting),
# Idoox WASP, Paul (SOAP::Lite), PranishK (ATL), GLUE, Aumsoft,
# HP, EasySoap, and Jake (Frontier). [Actzero accepts either]
return serv.Buy(PO=po_d)
except:
# called PurchaseOrder by KeithBa
return serv.Buy(PurchaseOrder=po_d)
def Ping(serv, sa, epname):
serv = serv._sa (sa % {'methodname':'Ping'})
return serv.Ping()
def main():
servers = DEFAULT_SERVERS_FILE
methodnums = None
output = 'f'
invert = 0
succeed = 0
printtrace = 0
stats = 1
total = 0
fail = 0
failok = 0
notimp = 0
try:
opts,args = getopt.getopt (sys.argv[1:], '?dm:io:s:t',
['help', 'method', 'debug', 'invert',
'output', 'servers='])
for opt, arg in opts:
if opt in ('-?', '--help'):
usage ()
elif opt in ('-d', '--debug'):
SOAP.Config.debug = 1
elif opt in ('-i', '--invert'):
invert = 1
elif opt in ('-m', '--method'):
if arg == '?':
methodUsage ()
methodnums = str2list (arg)
elif opt in ('-o', '--output'):
output = arg
elif opt in ('-s', '--servers'):
servers = arg
else:
raise AttributeError, \
"Recognized but unimplemented option `%s'" % opt
except SystemExit:
raise
except:
usage (sys.exc_info ()[1])
if 'a' in output:
output = 'fFns'
servers = readServers(servers)
if methodnums == None:
methodnums = range (1, len (DEFAULT_METHODS) + 1)
limitre = re.compile ('|'.join (args), re.IGNORECASE)
for s in servers:
if (not not limitre.match (s['name'])) == invert:
continue
serv = SOAP.SOAPProxy(s['endpoint'], namespace = s['namespace'])
for num in (methodnums):
if num > len(DEFAULT_METHODS):
break
total += 1
name = DEFAULT_METHODS[num - 1]
title = '%s: %s (#%d)' % (s['name'], name, num)
try:
fn = globals ()[name]
except KeyboardInterrupt:
raise
except:
if 'n' in output:
print title, "test not yet implemented"
notimp += 1
continue
try:
res = fn (serv, s['soapaction'], s['name'])
if s['nonfunctional'].has_key (name):
print title, "succeeded despite marked nonfunctional"
elif 's' in output:
print title, "succeeded "
succeed += 1
except KeyboardInterrupt:
print "fail"
raise
except:
if s['nonfunctional'].has_key (name):
if 'F' in output:
t = 'as expected'
if s['nonfunctional'][name] != '':
t += ', ' + s['nonfunctional'][name]
print title, "failed (%s) -" %t, sys.exc_info()[1]
failok += 1
else:
if 'f' in output:
print title, "failed -", str (sys.exc_info()[1])
fail += 1
if stats:
print " Tests ended at:", time.ctime (time.time())
if stats > 0:
print " Total tests: %d" % total
print " Successes: %d (%3.2f%%)" % \
(succeed, 100.0 * succeed / total)
if stats > 0 or fail > 0:
print "Failed unexpectedly: %d (%3.2f%%)" % \
(fail, 100.0 * fail / total)
if stats > 0:
print " Failed as expected: %d (%3.2f%%)" % \
(failok, 100.0 * failok / total)
if stats > 0 or notimp > 0:
print " Not implemented: %d (%3.2f%%)" % \
(notimp, 100.0 * notimp / total)
return fail + notimp
if __name__ == "__main__":
main()
| Python |
from SOAPpy import SOAP
import sys
import getopt
def usage():
print """usage: %s [options]
-m, --method=METHOD#[,METHOD#...] specify METHOD# of ? for the list
-p, --port=PORT# allows to specify PORT# of server
"""
sys.exit(1)
def methodUsage():
print "The available methods are:"
print "1. Monitor \t\t2. Clear"
sys.exit(0)
port = 12080
methodnum = 1
try:
opts, args = getopt.getopt (sys.argv[1:], 'p:m:', ['method','port'])
for opt, arg in opts:
if opt in ('-m','--method'):
if arg == '?':
methodUsage()
methodnum = int(arg)
elif opt in ('-p', '--port'):
port = int(arg)
else:
raise AttributeError, "Recognized but unimpl option '%s'" % opt
except SystemExit:
raise
except:
usage ()
ep = "http://208.177.157.221:%d/xmethodsInterop" % (port)
sa = "urn:soapinterop"
ns = "http://www.soapinterop.org/Bid"
serv = SOAP.SOAPProxy(ep, namespace =ns, soapaction = sa)
if methodnum == 1:
print serv.Monitor(str="actzero")
elif methodnum == 2:
print serv.Clear(str="actzero")
else:
print "invalid methodnum"
methodUsage()
| Python |
#!/usr/bin/env python
ident = '$Id: whoisTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
server = SOAPProxy("http://www.SoapClient.com/xml/SQLDataSoap.WSDL",
http_proxy=proxy)
print "whois>>", server.ProcessSRL(SRLFile="WHOIS.SRI",
RequestName="whois",
key = "microsoft.com")
| Python |
#!/usr/bin/env python
ident = '$Id: BabelfishWSDLTest.py,v 1.1 2003/07/18 15:58:28 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import WSDL
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
server = WSDL.Proxy('http://www.xmethods.net/sd/2001/BabelFishService.wsdl',
http_proxy=proxy)
english = "Hi Friend!"
print "Babelfish Translations"
print "------------------------"
print "English: '%s'" % english
print "French: '%s'" % server.BabelFish('en_fr',english)
print "Spanish: '%s'" % server.BabelFish('en_es',english)
print "Italian: '%s'" % server.BabelFish('en_it',english)
print "German: '%s'" % server.BabelFish('en_de',english)
print "Done."
| Python |
import sys
sys.path.insert(1, "..")
import SOAPpy
import time
dep = SOAPpy.dateTimeType((2004, 3, 24, 12, 30, 59, 4, 86, 0))
ret = SOAPpy.dateTimeType((2004, 3, 26, 12, 30, 59, 4, 86, 0))
in0 = SOAPpy.structType()
in0._addItem('outwardDate', dep)
in0._addItem('returnDate', ret)
in0._addItem('originAirport', 'den')
in0._addItem('destinationAirport', 'iad')
x = SOAPpy.buildSOAP(
in0,
method="getAirFareQuote",
namespace="urn:SBGAirFareQuotes.sbg.travel.ws.dsdata.co.uk"
)
wsdl = 'http://www.xmethods.net/sd/2001/TemperatureService.wsdl'
proxy = SOAPpy.WSDL.Proxy(wsdl)
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: xmethods.py,v 1.4 2003/12/18 06:31:50 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
print "##########################################"
print " SOAP services registered at xmethods.net"
print "##########################################"
server = SOAPProxy("http://www.xmethods.net/interfaces/query",
namespace = 'urn:xmethods-delayed-quotes',
http_proxy=proxy)
names = server.getAllServiceNames()
for item in names:
print 'name:', item['name']
print 'id :', item['id']
print
| Python |
#!/usr/bin/env python
ident = '$Id: newsTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
SoapEndpointURL = 'http://www22.brinkster.com/prasads/BreakingNewsService.asmx?WSDL'
MethodNamespaceURI = 'http://tempuri.org/'
# Three ways to do namespaces, force it at the server level
server = SOAPProxy(SoapEndpointURL, namespace = MethodNamespaceURI,
soapaction='http://tempuri.org/GetCNNNews', encoding = None,
http_proxy=proxy)
print "[server level CNN News call]"
print server.GetCNNNews()
# Do it inline ala SOAP::LITE, also specify the actually ns (namespace) and
# sa (soapaction)
server = SOAPProxy(SoapEndpointURL, encoding = None)
print "[inline CNNNews call]"
print server._ns('ns1',
MethodNamespaceURI)._sa('http://tempuri.org/GetCNNNews').GetCNNNews()
# Create an instance of your server with specific namespace and then use
# inline soapactions for each call
dq = server._ns(MethodNamespaceURI)
print "[namespaced CNNNews call]"
print dq._sa('http://tempuri.org/GetCNNNews').GetCNNNews()
print "[namespaced CBSNews call]"
print dq._sa('http://tempuri.org/GetCBSNews').GetCBSNews()
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: translateTest.py,v 1.5 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
server = SOAPProxy("http://services.xmethods.com:80/perl/soaplite.cgi",
http_proxy=proxy)
babel = server._ns('urn:xmethodsBabelFish#BabelFish')
print babel.BabelFish(translationmode = "en_fr",
sourcedata = "The quick brown fox did something or other")
| Python |
import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
| Python |
#!/usr/bin/python2
#standard imports
import syslog, sys
#domain specific imports
sys.path.insert (1, '..')
import SOAPpy
SOAPpy.Config.simplify_objects=1
## def test_integer(self,pass_integer):
## def test_string(self,pass_string):
## def test_float(self,pass_float):
## def test_tuple(self,pass_tuple):
## def test_list(self,pass_list):
## def test_dictionary(self,pass_dictionary):
if __name__ == "__main__":
server = SOAPpy.SOAPProxy("http://localhost:9999")
original_integer = 5
result_integer = server.test_integer(original_integer)
print "original_integer %s" % original_integer
print "result_integer %s" % result_integer
assert(result_integer==original_integer)
print
original_string = "five"
result_string = server.test_string(original_string)
print "original_string %s" % original_string
print "result_string %s" % result_string
assert(result_string==original_string)
print
original_float = 5.0
result_float = server.test_float(original_float)
print "original_float %s" % original_float
print "result_float %s" % result_float
assert(result_float==original_float)
print
original_tuple = (1,2,"three","four",5)
result_tuple = server.test_tuple(original_tuple)
print "original_tuple %s" % str(original_tuple)
print "result_tuple %s" % str(result_tuple)
assert(tuple(result_tuple)==original_tuple)
print
original_list = [5,4,"three",2,1]
result_list = server.test_list(original_list)
print "original_list %s" % original_list
print "result_list %s" % result_list
assert(result_list==original_list)
print
original_dictionary = {
'one': 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
}
result_dictionary = server.test_dictionary(original_dictionary)
print "original_dictionary %s" % original_dictionary
print "result_dictionary %s" % result_dictionary
assert(result_dictionary==original_dictionary)
print
server.quit()
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import string
import sys
sys.path.insert (1, '..')
from SOAPpy import *
ident = '$Id: cardServer.py,v 1.4 2004/02/18 21:22:13 warnes Exp $'
# create the list of all cards, and keep strings for each suit
__cs = "Clubs"
__ds = "Diamonds"
__hs = "Hearts"
__ss = "Spades"
__cards = []
for suit in [__cs, __ds, __hs, __ss]:
for num in range(9):
num += 1
__cards.append(str(num+1)+" of "+suit)
for face in ["ace","King","Queen","Jack"]:
__cards.append(face+" of "+suit)
def deal(num):
if num not in range(1,53):
return -1
else:
alreadydealt = []
ignore = 0
handdealt = []
import whrandom
while num > 0:
idx = int(str(whrandom.random())[2:4])
if idx in range(52) and idx not in alreadydealt:
handdealt.append(__cards[idx])
alreadydealt.append(idx)
num -= 1
else:
ignore += 1
continue
return handdealt
def arrangeHand(hand):
c = []
d = []
h = []
s = []
import string
for card in hand:
if string.find(card, __cs) != -1:
c.append(card)
elif string.find(card, __ds) != -1:
d.append(card)
elif string.find(card, __hs) != -1:
h.append(card)
elif string.find(card, __ss) != -1:
s.append(card)
for cards, str in ((c, __cs),(d, __ds),(h,__hs), (s,__ss)):
cards.sort()
idx = 0
if "10 of "+str in cards:
cards.remove("10 of "+str)
if "Jack of "+str in cards: idx += 1
if "Queen of "+str in cards: idx += 1
if "King of "+str in cards: idx += 1
if "ace of "+str in cards: idx +=1
cards.insert(len(cards)-idx,"10 of "+str)
if "King of "+str in cards:
cards.remove("King of "+str)
if "ace of "+str in cards: cards.insert(len(cards)-1,"King of "+str)
else: cards.append("King of "+str)
return c+d+h+s
def dealHand (NumberOfCards, StringSeparator):
hand = deal(NumberOfCards)
return string.join(hand,StringSeparator)
def dealArrangedHand (NumberOfCards, StringSeparator):
if NumberOfCards < 1 or NumberOfCards > 52:
raise ValueError, "NumberOfCards must be between 1 and 52"
unarranged = deal(NumberOfCards)
hand = arrangeHand(unarranged)
return string.join(hand, StringSeparator)
def dealCard ():
return deal(1)[0]
run = 1
def quit():
global run
run=0;
namespace = 'http://soapinterop.org/'
server = SOAPServer (("localhost", 12027))
server.registerKWFunction (dealHand, namespace)
server.registerKWFunction (dealArrangedHand, namespace)
server.registerKWFunction (dealCard, namespace)
server.registerKWFunction (quit, namespace)
try:
while run:
server.handle_request()
except KeyboardInterrupt:
pass
| Python |
import sys
sys.path.insert(1, "..")
import SOAPpy
url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'
zip = '06340'
proxy = SOAPpy.WSDL.Proxy(url)
temp = proxy.getTemp(zip)
print 'Temperature at', zip, 'is', temp
| Python |
"""
Check handing of unicode.
"""
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
#Config.dumpHeadersIn = 1
#Config.dumpSOAPIn = 1
#Config.dumpSOAPOut = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 0
#Config.BuildWithNoType = 1
#Config.BuildWithNoNamespacePrefix = 1
server = SOAPProxy("http://localhost:9900/")
x = u'uMOO' # Single unicode string
y = server.echo_simple((x,))
assert( x==y[0] )
x = [u'uMoo1',u'uMoo2'] # array of unicode strings
y = server.echo_simple(x)
assert( x[0] == y[0] )
assert( x[1] == y[1] )
x = {
u'A':1,
u'B':u'B',
'C':u'C',
'D':'D'
}
y = server.echo_simple(x)
for key in x.keys():
assert( x[key] == y[0][key] )
print "Success"
| Python |
from SOAPpy import WSDL
server = WSDL.Proxy('/home/warneg/src/google/googleapi/GoogleSearch.wsdl')
key = "6k0oDPZQFHL0zpjy6ZO6ufUVFKBgvqTo"
results = server.doGoogleSearch(key, 'warnes', 0, 10, False, "",
False, "", "utf-8", "utf-8")
for i in range(len(results.resultElements)):
res = results.resultElements[i]
print '%d: %s --> %s' % ( i, res.title, res.URL )
| Python |
#!/usr/bin/env python
import unittest
import os, re
import sys
sys.path.insert (1, '..')
import SOAPpy
ident = '$Id: testWSDL.py,v 1.2 2003/05/09 12:46:11 warnes Exp $'
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
http_proxy = "%s:%s" % (phost, pport)
except:
http_proxy = None
class IntegerArithmenticTestCase(unittest.TestCase):
def setUp(self):
self.wsdlstr1 = '''<?xml version="1.0"?>
<definitions name="TemperatureService" targetNamespace="http://www.xmethods.net/sd/TemperatureService.wsdl" xmlns:tns="http://www.xmethods.net/sd/TemperatureService.wsdl" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns="http://schemas.xmlsoap.org/wsdl/">
<message name="getTempRequest">
<part name="zipcode" type="xsd:string"/>
</message>
<message name="getTempResponse">
<part name="return" type="xsd:float"/>
</message>
<portType name="TemperaturePortType">
<operation name="getTemp">
<input message="tns:getTempRequest"/>
<output message="tns:getTempResponse"/>
</operation>
</portType>
<binding name="TemperatureBinding" type="tns:TemperaturePortType">
<soap:binding style="rpc" transport="http://schemas.xmlsoap.org/soap/http"/>
<operation name="getTemp">
<soap:operation soapAction=""/>
<input>
<soap:body use="encoded" namespace="urn:xmethods-Temperature" encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"/>
</input>
<output>
<soap:body use="encoded" namespace="urn:xmethods-Temperature" encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"/>
</output>
</operation>
</binding>
<service name="TemperatureService">
<documentation>Returns current temperature in a given U.S. zipcode </documentation>
<port name="TemperaturePort" binding="tns:TemperatureBinding">
<soap:address location="http://services.xmethods.net:80/soap/servlet/rpcrouter"/>
</port>
</service>
</definitions>
'''
def testParseWsdlString(self):
'''Parse XMethods TemperatureService wsdl from a string.'''
wsdl = SOAPpy.WSDL.Proxy(self.wsdlstr1, http_proxy=http_proxy)
self.assertEquals(len(wsdl.methods), 1)
method = wsdl.methods.values()[0]
self.assertEquals(method.methodName, 'getTemp')
self.assertEquals(method.namespace, 'urn:xmethods-Temperature')
self.assertEquals(method.location,
'http://services.xmethods.net:80/soap/servlet/rpcrouter')
def testParseWsdlFile(self):
'''Parse XMethods TemperatureService wsdl from a file.'''
# figure out path to the test directory
dir = os.path.abspath('.')
fname = './TemperatureService.wsdl'
try:
f = file(fname)
except (IOError, OSError):
self.assert_(0, 'Cound not find wsdl file "%s"' % file)
wsdl = SOAPpy.WSDL.Proxy(fname, http_proxy=http_proxy)
self.assertEquals(len(wsdl.methods), 1)
method = wsdl.methods.values()[0]
self.assertEquals(method.methodName, 'getTemp')
self.assertEquals(method.namespace, 'urn:xmethods-Temperature')
self.assertEquals(method.location,
'http://services.xmethods.net:80/soap/servlet/rpcrouter')
def testParseWsdlUrl(self):
'''Parse XMethods TemperatureService wsdl from a url.'''
wsdl = SOAPpy.WSDL.Proxy('http://www.xmethods.net/sd/2001/TemperatureService.wsdl', http_proxy=http_proxy)
self.assertEquals(len(wsdl.methods), 1)
method = wsdl.methods.values()[0]
self.assertEquals(method.methodName, 'getTemp')
self.assertEquals(method.namespace, 'urn:xmethods-Temperature')
self.assertEquals(method.location,
'http://services.xmethods.net:80/soap/servlet/rpcrouter')
def testGetTemp(self):
'''Parse TemperatureService and call getTemp.'''
zip = '01072'
proxy = SOAPpy.WSDL.Proxy(self.wsdlstr1, http_proxy=http_proxy)
temp = proxy.getTemp(zip)
print 'Temperature at', zip, 'is', temp
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
#Config.dumpHeadersIn = 1
#Config.dumpSOAPIn = 1
#Config.dumpSOAPOut = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 1
#Config.BuildWithNoType = 1
#Config.BuildWithNoNamespacePrefix = 1
if len(sys.argv) > 1 and sys.argv[1] == '-s':
# Use secure http
pathserver = SOAPProxy("https://localhost:9900/pathtest")
server = SOAPProxy("https://localhost:9900")
elif len(sys.argv) > 1 and sys.argv[1] == '-g':
# use Globus for communication
import pyGlobus
pathserver = SOAPProxy("httpg://localhost:9900/pathtest")
server = SOAPProxy("httpg://localhost:9900")
else:
# Default: use standard http
pathserver = SOAPProxy("http://localhost:9900/pathtest")
server = SOAPProxy("http://localhost:9900")
# Echo...
try:
print server.echo("MOO")
except Exception, e:
print "Caught exception: ", e
try:
print pathserver.echo("MOO")
except Exception, e:
print "Caught exception: ", e
# ...in an object
try:
print server.echo_ino("moo")
except Exception, e:
print "Caught exception: ", e
try:
print pathserver.echo_ino("cow")
except Exception, e:
print "Caught exception: ", e
# ...in an object in an object
try:
print server.prop.echo2("moo")
except Exception, e:
print "Caught exception: ", e
try:
print pathserver.prop.echo2("cow")
except Exception, e:
print "Caught exception: ", e
# ...with keyword arguments
try:
print server.echo_wkw(third = "three", first = "one", second = "two")
except Exception, e:
print "Caught exception: ", e
try:
print pathserver.echo_wkw(third = "three", first = "one", second = "two")
except Exception, e:
print "Caught exception: ", e
# ...with a context object
try:
print server.echo_wc("moo")
except Exception, e:
print "Caught exception: ", e
try:
print pathserver.echo_wc("cow")
except Exception, e:
print "Caught exception: ", e
# ...with a header
hd = headerType(data = {"mystring": "Hello World"})
try:
print server._hd(hd).echo_wc("moo")
except Exception, e:
print "Caught exception: ", e
try:
print pathserver._hd(hd).echo_wc("cow")
except Exception, e:
print "Caught exception: ", e
# close down server
server.quit()
| Python |
#!/usr/bin/env python
ident = '$Id: speedTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import time
import sys
sys.path.insert(1, "..")
x='''<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/1999/XMLSchema">
<SOAP-ENV:Body>
<ns1:getRate xmlns:ns1="urn:demo1:exchange" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<country1 xsi:type="xsd:string">USA</country1>
<country2 xsi:type="xsd:string">japan</country2>
</ns1:getRate>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
x2='''<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<varString xsi:type="xsd:string">West Virginia</varString>
<varInt xsi:type="xsd:int">-546</varInt>
<varFloat xsi:type="xsd:float">-5.398</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<varString xsi:type="xsd:string">New Mexico</varString>
<varInt xsi:type="xsd:int">-641</varInt>
<varFloat xsi:type="xsd:float">-9.351</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<varString xsi:type="xsd:string">Missouri</varString>
<varInt xsi:type="xsd:int">-819</varInt>
<varFloat xsi:type="xsd:float">1.495</varFloat>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
# Import in function, because for some reason they slow each other
# down in same namespace ???
def SOAPParse(inxml):
from SOAPpy import parseSOAPRPC
t= time.time()
parseSOAPRPC(inxml)
return time.time()-t
def SAXParse(inxml):
import xml.sax
y = xml.sax.handler.ContentHandler()
t= time.time()
xml.sax.parseString(inxml,y)
return time.time()-t
def DOMParse(inxml):
import xml.dom.minidom
t= time.time()
xml.dom.minidom.parseString(inxml)
return time.time()-t
# Wierd but the SAX parser runs really slow the first time.
# Probably got to load a c module or something
SAXParse(x)
print
print "Simple XML"
print "SAX Parse, no marshalling ", SAXParse(x)
print "SOAP Parse, and marshalling ", SOAPParse(x)
print "DOM Parse, no marshalling ", DOMParse(x)
print
print "Complex XML (references)"
print "SAX Parse, no marshalling ", SAXParse(x2)
print "SOAP Parse, and marshalling ", SOAPParse(x2)
print "DOM Parse, no marshalling ", DOMParse(x2)
| Python |
#!/usr/bin/python
import sys
sys.path.insert(1, "..")
import SOAPpy
import time
import gc
import types
gc.set_debug(gc.DEBUG_SAVEALL)
for i in range(400):
try:
t = SOAPpy.SOAP.parseSOAPRPC('bad soap payload')
except: pass
gc.collect()
if len(gc.garbage):
print 'still leaking'
else:
print 'no leak'
| Python |
#!/usr/bin/python2
#standard imports
import syslog, sys
#domain specific imports
sys.path.insert (1, '..')
import SOAPpy
class test_service:
run = 1
def test_integer(self,pass_integer):
print type(pass_integer)
return pass_integer
def test_string(self,pass_string):
print type(pass_string)
return pass_string
def test_float(self,pass_float):
print type(pass_float)
return pass_float
def test_tuple(self,pass_tuple):
print type(pass_tuple), pass_tuple
return pass_tuple
def test_list(self,pass_list):
print type(pass_list), pass_list
return pass_list
def test_dictionary(self,pass_dictionary):
print type(pass_dictionary), pass_dictionary
return pass_dictionary
def quit(self):
self.run = 0
server = SOAPpy.SOAPServer(("localhost",9999))
SOAPpy.Config.simplify_objects=1
access_object = test_service()
server.registerObject(access_object)
while access_object.run:
server.handle_request()
| Python |
import gc
import socket
import threading
import time
import unittest
import sys
sys.path.insert(1, "..")
import SOAPpy
#SOAPpy.Config.debug=1
# global to shut down server
quit = 0
def echoDateTime(dt):
return dt
def echo(s):
"""repeats a string twice"""
return s + s
def kill():
"""tell the server to quit"""
global quit
quit = 1
def server1():
"""start a SOAP server on localhost:8000"""
print "Starting SOAP Server...",
server = SOAPpy.Server.SOAPServer(addr=('127.0.0.1', 8000))
server.registerFunction(echoDateTime)
server.registerFunction(echo)
server.registerFunction(kill)
print "Done."
global quit
while not quit:
server.handle_request()
quit = 0
print "Server shut down."
class ClientTestCase(unittest.TestCase):
server = None
startup_timeout = 5 # seconds
def setUp(self):
'''This is run once before each unit test.'''
serverthread = threading.Thread(target=server1, name="SOAPServer")
serverthread.start()
start = time.time()
connected = False
server = None
while not connected and time.time() - start < self.startup_timeout:
print "Trying to connect to the SOAP server...",
try:
server = SOAPpy.Client.SOAPProxy('127.0.0.1:8000')
server.echo('Hello World')
except socket.error, e:
print "Failure:", e
time.sleep(0.5)
else:
connected = True
self.server = server
print "Success."
if not connected: raise 'Server failed to start.'
def tearDown(self):
'''This is run once after each unit test.'''
print "Trying to shut down SOAP server..."
if self.server is not None:
self.server.kill()
time.sleep(5)
return 1
def testEcho(self):
'''Test echo function.'''
server = SOAPpy.Client.SOAPProxy('127.0.0.1:8000')
s = 'Hello World'
self.assertEquals(server.echo(s), s+s)
def testNamedEcho(self):
'''Test echo function.'''
server = SOAPpy.Client.SOAPProxy('127.0.0.1:8000')
s = 'Hello World'
self.assertEquals(server.echo(s=s), s+s)
def testEchoDateTime(self):
'''Test passing DateTime objects.'''
server = SOAPpy.Client.SOAPProxy('127.0.0.1:8000')
dt = SOAPpy.Types.dateTimeType(data=time.time())
dt_return = server.echoDateTime(dt)
self.assertEquals(dt_return, dt)
# def testNoLeak(self):
# '''Test for memory leak.'''
# gc.set_debug(gc.DEBUG_SAVEALL)
# for i in range(400):
# server = SOAPpy.Client.SOAPProxy('127.0.0.1:8000')
# s = 'Hello World'
# server.echo(s)
# gc.collect()
# self.assertEquals(len(gc.garbage), 0)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert (1, '..')
from SOAPpy import *
ident = '$Id: cardClient.py,v 1.4 2004/02/18 21:22:13 warnes Exp $'
endpoint = "http://localhost:12027/xmethodsInterop"
sa = "urn:soapinterop"
ns = "http://soapinterop.org/"
serv = SOAPProxy(endpoint, namespace=ns, soapaction=sa)
try: hand = serv.dealHand(NumberOfCards = 13, StringSeparator = '\n')
except: print "no dealHand"; hand = 0
try: sortedhand = serv.dealArrangedHand(NumberOfCards=13,StringSeparator='\n')
except: print "no sorted"; sortedhand = 0
try: card = serv.dealCard()
except: print "no card"; card = 0
print "*****hand****\n",hand,"\n*********"
print "******sortedhand*****\n",sortedhand,"\n*********"
print "card:",card
serv.quit()
| Python |
#!/usr/bin/env python
import sys, unittest
sys.path.insert(1, "..")
from SOAPpy import *
Config.debug=1
class ClientTestCase(unittest.TestCase):
def testParseRules(self):
x = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body
soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SomeMethod>
<Result>
<Book>
<title>My Life and Work</title>
</Book>
<Person>
<name>Henry Ford</name>
<age> 49 </age>
<height> 5.5 </height>
</Person>
</Result>
</SomeMethod>
</soap:Body>
</soap:Envelope>
"""
def negfloat(x):
return float(x) * -1.0
# parse rules
pr = {'SomeMethod':
{'Result':
{
'Book': {'title':'string'},
'Person': {'age':'int',
'height':negfloat}
}
}
}
y = parseSOAPRPC(x, rules=pr)
assert y.Result.Person.age == 49
assert y.Result.Person.height == -5.5
x = '''<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<item>12</item>
<item>23</item>
<item>0</item>
<item>-31</item>
</param>
<param1 xsi:null="1"></param1>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
# parse rules
pr = {'Bounds':
{'param': 'arrayType=string[]',
}
}
pr2 = {'Bounds':
{'param': 'arrayType=int[4]',
}
}
y = parseSOAPRPC(x, rules=pr)
assert y.param[1]=='23'
y = parseSOAPRPC(x, rules=pr2)
assert y.param[1]==23
x = '''<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<item xsi:type="xsd:int">12</item>
<item xsi:type="xsd:string">23</item>
<item xsi:type="xsd:float">0</item>
<item xsi:type="xsd:int">-31</item>
</param>
<param1 xsi:null="1"></param1>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
pr = {'Bounds':
{'param': 'arrayType=ur-type[]'
}
}
y = parseSOAPRPC(x, rules=pr)
assert y.param[0]==12
assert y.param[1]=='23'
assert y.param[2]==float(0)
assert y.param[3]==-31
# Try the reverse, not implemented yet.
def testBuildObject(self):
class Book(structType):
def __init__(self):
self.title = "Title of a book"
class Person(structType):
def __init__(self):
self.age = "49"
self.height = "5.5"
class Library(structType):
def __init__(self):
self._name = "Result"
self.Book = Book()
self.Person = Person()
obj = Library()
x = buildSOAP( kw={'Library':obj} )
print(x)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: quoteTest.py,v 1.5 2003/12/18 06:31:50 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
# Three ways to do namespaces, force it at the server level
server = SOAPProxy("http://services.xmethods.com:9090/soap",
namespace = 'urn:xmethods-delayed-quotes',
http_proxy=proxy)
print "IBM>>", server.getQuote(symbol = 'IBM')
# Do it inline ala SOAP::LITE, also specify the actually ns
server = SOAPProxy("http://services.xmethods.com:9090/soap",
http_proxy=proxy)
print "IBM>>", server._ns('ns1',
'urn:xmethods-delayed-quotes').getQuote(symbol = 'IBM')
# Create a namespaced version of your server
dq = server._ns('urn:xmethods-delayed-quotes')
print "IBM>>", dq.getQuote(symbol='IBM')
print "ORCL>>", dq.getQuote(symbol='ORCL')
print "INTC>>", dq.getQuote(symbol='INTC')
| Python |
import sys
sys.path.insert(1, "..")
from SOAPpy import *
detailed_fault = \
"""
<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:a1="http://schemas.microsoft.com/clr/ns/System.Runtime.Serialization.Formatters">
<SOAP-ENV:Body>
<SOAP-ENV:Fault id="ref-1">
<faultcode>soapenv:Server.generalException</faultcode>
<faultstring>Exception thrown on Server</faultstring>
<detail>
<loginFailureFault href="#id0"/>
<exceptionName xsi:type="xsd:string">...</exceptionName>
</detail>
</SOAP-ENV:Fault>
<multiRef id="id0">
<description xsi:type="xsd:string">Login failure (504):Unknown User</description>
<module xsi:type="xsd:string"> ... </module>
<timestamp xsi:type="xsd:string">...</timestamp>
<faultcode xsi:type="xsd:string"> ...</faultcode>
<parameter xsi:type="xsd:string"> ... </parameter>
</multiRef>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
z = parseSOAPRPC(detailed_fault.strip() )
assert(z.__class__==faultType)
assert(z.faultstring=="Exception thrown on Server")
assert(z.detail.loginFailureFault.description=='Login failure (504):Unknown User')
print "Success"
| Python |
#!/usr/bin/env python
ident = '$Id: weatherTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
SoapEndpointURL = 'http://services.xmethods.net:80/soap/servlet/rpcrouter'
MethodNamespaceURI = 'urn:xmethods-Temperature'
# Do it inline ala SOAP::LITE, also specify the actually ns
server = SOAPProxy(SoapEndpointURL, http_proxy=proxy)
print "inline", server._ns('ns1', MethodNamespaceURI).getTemp(zipcode='94063')
| Python |
#!/usr/bin/env python
ident = '$Id: storageTest.py,v 1.6 2005/02/16 04:24:54 warnes Exp $'
import sys, os, time, signal, re
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy, SOAPConfig, SOAPUserAgent
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
http_proxy = "%s:%s" % (phost, pport)
except:
http_proxy = None
PROXY="http://www.soapware.org/xmlStorageSystem"
EMAIL="SOAPpy@actzero.com"
NAME="test_user"
PASSWORD="mypasswd"
SERIAL=1123214
MY_PORT=15600
def resourceChanged (url):
print "\n##### NOTIFICATION MESSAGE: Resource %s has changed #####\n" % url
return booleanType(1)
def printstatus (cmd, stat):
print
if stat.flError:
print "### %s failed: %s ###" % (cmd, stat.message)
else:
print "### %s successful: %s ###" % (cmd, stat.message)
return not stat.flError
server = SOAPProxy(encoding="US-ASCII",
proxy=PROXY,
soapaction="/xmlStorageSystem",
http_proxy=http_proxy,
# config=SOAPConfig(debug=1)
)
# Register as a new user or update user information
reg = server.registerUser(email=EMAIL, name=NAME, password=PASSWORD,
clientPort=MY_PORT, userAgent=SOAPUserAgent(),
serialnumber=SERIAL)
printstatus("registerUser", reg)
# See what this server can do
reg = server.getServerCapabilities (email=EMAIL, password=PASSWORD)
if printstatus("getServerCapabilities", reg):
print "Legal file extensions: " + str(reg.legalFileExtensions)
print "Maximum file size: " + str(reg.maxFileSize)
print "Maximum bytes per user: " + str(reg.maxBytesPerUser)
print "Number of bytes in use by the indicated user: " + str(reg.ctBytesInUse)
print "URL of the folder containing your files: " + str(reg.yourUpstreamFolderUrl)
# Store some files
reg = server.saveMultipleFiles (email=EMAIL, password=PASSWORD,
relativepathList=['index.html','again.html'],
fileTextList=['<html><title>bennett@actzero.com home page</title><body>' +
'<a href=again.html>Hello Earth</a></body></html>',
'<html><title>bennett@actzero.com home page</title><body>' +
'<a href=index.html>Hello Earth Again</a></body></html>'])
if printstatus("saveMultipleFiles", reg):
print "Files stored:"
for file in reg.urlList:
print " %s" % file
# Save this for call to test pleaseNotify
mylist = reg.urlList
else:
mylist = []
# Check to see what files are stored
reg = server.getMyDirectory (email=EMAIL, password=PASSWORD)
if printstatus("getMyDirectory", reg):
i = 1
while hasattr(reg.directory, "file%05d" % i):
d = getattr(reg.directory, "file%05d" % i)
print "Relative Path: %s" % d.relativePath
print "Size: %d" % d.size
print "Created: %s" % d.whenCreated
print "Last Uploaded: %s" % d.whenLastUploaded
print "URL: %s" % d.url
print
i += 1
# Set up notification
reg = server.pleaseNotify(notifyProcedure="resourceChanged", port=MY_PORT, path="/", protocol="soap", urlList=mylist)
printstatus("notifyProcedure", reg)
pid = os.fork()
if pid == 0:
# I am a child process. Set up SOAP server to receive notification
print
print "## Starting notification server ##"
s = SOAPServer(('localhost', MY_PORT))
s.registerFunction(resourceChanged)
s.serve_forever()
else:
def handler(signum, frame):
# Kill child process
print "Killing child process %d" % pid
os.kill(pid, signal.SIGINT)
signal.signal(signal.SIGINT, handler)
# I am a parent process
# Change some files
time.sleep(3)
reg = server.saveMultipleFiles (email=EMAIL, password=PASSWORD,
relativepathList=['index.html'],
fileTextList=['<html><title>bennett@actzero.com home page</title><body>' +
'<a href=again.html>Hello Bennett</a></body></html>'])
if printstatus("saveMultipleFiles", reg):
print "Files stored:"
for file in reg.urlList:
print " %s" % file
os.waitpid(pid, 0)
| Python |
"""
Check handing of unicode.
"""
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
#Config.dumpHeadersIn = 1
#Config.dumpSOAPIn = 1
#Config.dumpSOAPOut = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 1
#Config.BuildWithNoType = 1
#Config.BuildWithNoNamespacePrefix = 1
def headers():
'''Return a soap header containing all the needed information.'''
hd = Types.headerType()
hd.useragent = Types.stringType("foo")
return hd
server = SOAPProxy("http://localhost:9900/",header=headers())
adgroupid = 197497504
keyword1 = { 'status': 'Moderate',
'adGroupId': 197497504,
'destinationURL': None,
'language': '',
'text': 'does not work',
'negative': bool(0),
'maxCpc': 50000,
'type': 'Keyword',
'id': 1 }
keyword2 = { 'status': 'Moderate',
'adGroupId': 197497504,
'destinationURL': None,
'language': '',
'text': 'yes it does not',
'negative': bool(0),
'maxCpc': 50000,
'type': 'Keyword',
'id': 2 }
keylist = [keyword1, keyword2]
# Check that the data goes through properly
retval = server.echo_simple(adgroupid, keylist)
kw1 = retval[1][0]
kw2 = retval[1][1]
assert(retval[0] == adgroupid)
for key in kw1.keys():
assert(kw1[key]==keyword1[key])
for key in kw2.keys():
assert(kw2[key]==keyword2[key])
# Check that the header is preserved
retval = server.echo_header((adgroupid, keylist))
assert(retval[1].has_key('useragent'))
assert(retval[1]['useragent'] == 'foo')
server.quit()
print "Success!"
| Python |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: alanbushTest.py,v 1.5 2003/05/21 14:52:37 warnes Exp $'
import os, re,sys
# add local SOAPpy code to search path
sys.path.insert(1, "..")
from SOAPpy import *
Config.debug=0
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
SoapEndpointURL = 'http://www.alanbushtrust.org.uk/soap/compositions.asp'
MethodNamespaceURI = 'urn:alanbushtrust-org-uk:soap.methods'
SoapAction = MethodNamespaceURI + ".GetCategories"
server = SOAPProxy(SoapEndpointURL,
namespace=MethodNamespaceURI,
soapaction=SoapAction,
http_proxy=proxy
)
for category in server.GetCategories():
print category
| Python |
#!/usr/bin/env python
import sys
sys.path.insert(1, "..")
from SOAPpy import *
server = SOAPProxy("http://206.135.217.234:8000/")
server.COM_SetProperty("Visible", 1)
server.Workbooks.Open("c:\\test.xls")
server.COM_NestedCall('ActiveSheet.Range("A2").EntireRow.Delete()')
server.quit()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.