code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
'''
Created on Apr 23, 2011
@author: Dima
'''
import bingapi
N = 720000000L
#This Function returns number of results of keyword
#Using Bing Search Engine
def bingSearchResults(keyword):
searchval= keyword
appId = '722A2CD216180D27F3F09631E65DE629692AC3C5'
bing = bingapi.Bing(appId)
tempJson = bing.do_web_search(searchval)
if (len(tempJson['SearchResponse']['Web'])==2):
return 0
totNum = tempJson['SearchResponse']['Web']['Total']
return long(totNum)
| [
[
8,
0,
0.1364,
0.2273,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2727,
0.0455,
0,
0.66,
0.5,
976,
0,
1,
0,
0,
976,
0,
0
],
[
2,
0,
0.6818,
0.4091,
0,
0.66,
... | [
"'''\nCreated on Apr 23, 2011\n\n@author: Dima\n'''",
"import bingapi",
"def bingSearchResults(keyword):\n searchval= keyword\n appId = '722A2CD216180D27F3F09631E65DE629692AC3C5'\n bing = bingapi.Bing(appId) \n tempJson = bing.do_web_search(searchval)\n if (len(tempJson['Sear... |
'''
Created on 01/05/2011
@author: Eran_Z
'''
| [
[
8,
0,
0.6,
1,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
]
] | [
"'''\nCreated on 01/05/2011\n\n@author: Eran_Z\n'''"
] |
'''
Created on 08/04/2011
@author: Eran_Z
Google search (num results), based on Dima's implementation
currently uses deprecated API
'''
import json
import urllib
#N = 25270000000L #25.27 billion, roughly google's index size. Should be reduced for other engines.
N = 1870000000L #roughly the index of the deprecated API
def googleSearchResults(searchfor):
query = urllib.urlencode({'q': searchfor})
url = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&%s'%query
search_response = urllib.urlopen(url)
search_results = search_response.read()
results = json.loads(search_results)
data = results['responseData']
ret = data['cursor']['estimatedResultCount']
return long(ret)
| [
[
8,
0,
0.18,
0.32,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4,
0.04,
0,
0.66,
0.3333,
463,
0,
1,
0,
0,
463,
0,
0
],
[
1,
0,
0.44,
0.04,
0,
0.66,
0.6667,... | [
"'''\nCreated on 08/04/2011\n\n@author: Eran_Z\n\nGoogle search (num results), based on Dima's implementation\ncurrently uses deprecated API\n'''",
"import json",
"import urllib",
"def googleSearchResults(searchfor):\n query = urllib.urlencode({'q': searchfor})\n \n url = 'http://ajax.googleapis.com/... |
import urllib
try:
import json
except ImportError:
import simplejson as json
import logging
class Boss(object):
def __init__(self, appid, loglevel=logging.INFO):
self.appid = appid
self.log_filename = 'log.log'
logging.basicConfig(level=loglevel,
format='%(asctime)s %(name)-6s %(levelname)-8s %(message)s',
filename=self.log_filename)
def talk_to_yahoo(self, type_, query, **kwargs):
logging.info('Query:%s'%query)
logging.info('type_:%s'%type_)
logging.info('Other Args:%s'%kwargs)
base_url = 'http://boss.yahooapis.com/ysearch/%s/v1/%s?%s'
kwargs['appid'] = self.appid
payload = urllib.urlencode(kwargs)
final_url = base_url%(type_, query, payload)
logging.info('final_url: %s'%final_url)
response=urllib.urlopen(final_url)
data=json.load(response)
logging.info('data:%s'%data)
return data
def do_web_search(self, query, **kwargs):
return self.talk_to_yahoo('web', query, **kwargs)
def do_news_search(self, query, **kwargs):
return self.talk_to_yahoo('news', query, **kwargs)
def do_spelling_search(self, query, **kwargs):
return self.talk_to_yahoo('spelling', query, **kwargs)
def do_images_search(self, query, **kwargs):
return self.talk_to_yahoo('images', query, **kwargs)
def do_siteexplorer_search(self, query, **kwargs):
return self.talk_to_yahoo('se_inlink', query, **kwargs) | [
[
1,
0,
0.0233,
0.0233,
0,
0.66,
0,
614,
0,
1,
0,
0,
614,
0,
0
],
[
7,
0,
0.0814,
0.093,
0,
0.66,
0.3333,
0,
0,
1,
0,
0,
0,
0,
0
],
[
1,
1,
0.0698,
0.0233,
1,
0.3,
... | [
"import urllib",
"try:\n import json\nexcept ImportError:\n import simplejson as json",
" import json",
" import simplejson as json",
"import logging",
"class Boss(object):\n def __init__(self, appid, loglevel=logging.INFO):\n self.appid = appid\n self.log_filename = 'log.log'... |
'''
Created on May 22, 2011
@author: Dima
'''
import bossapi
N = 720000000L
#This Function returns number of results of keyword
#Using Boss Search Engine
def bossSearchResults(keyword):
searchval= keyword
appId = 'NDK5RxPV34EeYsgrmer5dNvgfQaQYuU4VYT._GHo.nxdkl_2502aXf5DcFpIe0PdYQ--'
boss = bossapi.Boss(appId)
tempJson = boss.do_web_search(searchval)
if (len(tempJson['ysearchresponse']['totalhits'])==2):
return 0
totNum = tempJson['ysearchresponse']['totalhits']
return long(totNum)
| [
[
8,
0,
0.1429,
0.2381,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2857,
0.0476,
0,
0.66,
0.5,
756,
0,
1,
0,
0,
756,
0,
0
],
[
2,
0,
0.7143,
0.4286,
0,
0.66,
... | [
"'''\nCreated on May 22, 2011\n\n@author: Dima\n'''",
"import bossapi",
"def bossSearchResults(keyword):\n searchval= keyword\n appId = 'NDK5RxPV34EeYsgrmer5dNvgfQaQYuU4VYT._GHo.nxdkl_2502aXf5DcFpIe0PdYQ--'\n boss = bossapi.Boss(appId)\n tempJson = boss.do_web_search(searchval)\n if (len(tempJson... |
'''
Created on 22/05/2011
@author: Dima
'''
| [
[
8,
0,
0.6,
1,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
]
] | [
"'''\nCreated on 22/05/2011\n\n@author: Dima\n'''"
] |
'''
Created on 01/05/2011
@author: Eran_Z
'''
| [
[
8,
0,
0.6,
1,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
]
] | [
"'''\nCreated on 01/05/2011\n\n@author: Eran_Z\n'''"
] |
'''
Created on 06/05/2011
@author: Eran_Z
populates the cache according to the input files
'''
from RSM.algorithm import imdb_m
from RSM.my_movies.models import Movie, SearchResult
from django.shortcuts import render_to_response
from RSM.util import printDebug, isLegalName
import string
import imdb
import os
from RSM.settings import SITE_ROOT
#from RSM.algorithm.search.bingSearch.bingSearch import bingSearchResults
from RSM.algorithm.search.bossSearch.bossSearch import bossSearchResults
#from RSM.algorithm.search.deprecatedGoogleSearch.deprecatedGoogleSearch import googleSearchResults
# either googleSearchResults or bingSearchResults
# and switch between the 2 imports above.
searchEngine = bossSearchResults
basePath = os.path.join(SITE_ROOT, 'algorithm/search/')
singlesFile = basePath + "best250.txt"
pairsFile = basePath + "best250pairs.txt"
def clearCache(part):
if not part or part == 2:
SearchResult.objects.all().delete()
if not part or part == 1:
Movie.objects.all().delete()
def populateCache(testMode, part=0):
clearCache(part)
printDebug('cache cleared')
with open(singlesFile, "r") as f:
lines = f.readlines()
numLines = len(lines)
if not part or part == 1:
strOfNumLines = ' of ' + str(numLines)
i=0
for line in lines:
i+=1
if(not testMode):
printDebug(str(i) + strOfNumLines)
lineList = line.split('\t')
if (len(lineList) == 2):
if(testMode):
(link, year) = ('', 1900)
else:
(link, year) = imdb_m.getLinkAndYear(lineList[0])
Movie(title=lineList[0], year=year,
searchSingle=lineList[1], link=link).save()
elif (len(lineList)):
printDebug("ERROR: malformed single searches file. line: " + line)
printDebug('done singles')
if not part or part == 2:
with open(pairsFile, "r") as f:
i=1
for line in f.xreadlines():
if(not testMode):
printDebug(str(i) + ', ' + str(i+1) + ' of ' + str(numLines * (numLines - 1)))
i+=2
lineList = line.split('\t')
if (len(lineList) == 3):
m1 = Movie.objects.get(title=lineList[0])
m2 = Movie.objects.get(title=lineList[1])
SearchResult(movie1=m1, movie2=m2, numResults=lineList[2]).save()
SearchResult(movie1=m2, movie2=m1, numResults=lineList[2]).save()
elif (len(lineList)):
printDebug("ERROR: malformed double searches file. line: " + line)
printDebug('done doubles')
def makeCache(request):
printDebug('populateCache started')
part = request.GET.get("part")
if part:
printDebug('only running part ' + part)
populateCache(False, int(part))
else:
populateCache(False)
printDebug('populateCache ended')
return render_to_response('recommendation/doneReset.html', {'func': 'resetCache'})
def makeTestCache():
printDebug('populateTestCache started')
print os.getcwd()
global singlesFile
global pairsFile
singlesFile = basePath + "best250Test.txt"
pairsFile = basePath + "best250pairsTest.txt"
print "changed file paths" + singlesFile + " &" + pairsFile
populateCache(True)
singlesFile = basePath + "best250.txt"
pairsFile = basePath + "best250pairs.txt"
print "restored file paths" + singlesFile + " &" + pairsFile
printDebug('populateTestCache ended')
def filter250(topMovies):
"""
returns a list that has all the names of movies from topMovies which are isLegalName.
"""
res = []
for m in topMovies:
name = m['title']
if isLegalName(name):
name = string.replace(name,'?','')
res.append(name)
return res
def createBest250SearchPairs(topMovies):
"""
This Function Creates file named best250Pairs.txt with pairs of best 250 movies names
and their search results number
"""
with open(pairsFile, "w") as f:
numMovies = len(topMovies)
strOfAll = ' of ' + str(numMovies * (numMovies - 1) / 2)
tot=0
for i in range(numMovies):
for j in range(i+1,numMovies):
tot += 1
printDebug(str(tot) + strOfAll)
find_name = u'"%s" "%s"' % (topMovies[i],topMovies[j])
res = searchEngine(find_name)
if not res:
res = 1
out = u'%s\t%s\t%d\n' % (topMovies[i],topMovies[j],res)
f.write(out)
f.flush()
def create250BestSearches(topMovies):
"""
This Function Creates file named best250.txt with best 250 movies names
and their search results number
"""
with open(singlesFile, "w") as f:
numMovies = len(topMovies)
strOfNumMovies = ' of ' + str(numMovies)
i=0
for m in topMovies:
i+=1
printDebug(str(i) + strOfNumMovies)
res = searchEngine("\""+m+"\"")
if not res:
res = 1
out = u'%s\t%d\n' % (m,res)
f.write(out)
f.flush()
def createBestMoviesFiles(request):
i = imdb.IMDb()
printDebug('CreateBestMoviesFiles started')
topMovies = i.get_top250_movies() # if we want less: [:n]
topMovies = filter250(topMovies)
printDebug('topMovies list generated')
printDebug('creating singles file')
create250BestSearches(topMovies)
printDebug('creating doubles file')
createBest250SearchPairs(topMovies)
printDebug('CreateBestMoviesFiles finished')
return render_to_response('recommendation/doneReset.html', {'func': 'resetCacheFiles'})
| [
[
8,
0,
0.0235,
0.0412,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0529,
0.0059,
0,
0.66,
0.0476,
17,
0,
1,
0,
0,
17,
0,
0
],
[
1,
0,
0.0588,
0.0059,
0,
0.66,
... | [
"'''\nCreated on 06/05/2011\n\n@author: Eran_Z\n\npopulates the cache according to the input files \n'''",
"from RSM.algorithm import imdb_m",
"from RSM.my_movies.models import Movie, SearchResult",
"from django.shortcuts import render_to_response",
"from RSM.util import printDebug, isLegalName",
"import ... |
'''
Created on 29/03/2011
@author: Eran_Z
Searching
'''
from RSM.my_movies.models import Movie, SearchResult
def searchSingle(term):
"""Returns number of hits for given term."""
return getattr(Movie.objects.get(title=term),'searchSingle')
def searchTogether(term1, term2):
"""Returns number of hits for 2 given terms together."""
if term1 == term2:
return searchSingle(term1)
m1 = Movie.objects.get(title=term1)
m2 = Movie.objects.get(title=term2)
return getattr(SearchResult.objects.get(movie1=m1,movie2=m2), 'numResults')
#------------- UNUSED EXTENSIONS --------------------
from math import log
from search.bingSearch import bingSearch
def __search(searchStr):
"""Searches given search string on the web.
Returns number of hits."""
return bingSearch.bingSearchResults(searchStr)
def searchExclusion(term, Ex):
"""Returns number of hits for given term,
excluding pages containing terms from the given exclusion group."""
#Note: this shouldn't be used with the current search options
searchStr = "\"" + term + "\""
for str in Ex:
searchStr += " -\"" + str + "\""
return __search(searchStr)
def NGD(x,y):
logx = log(searchSingle(x))
logy = log(searchSingle(y))
logxy = log(searchTogether(x,y))
logN = log(bingSearch.N)
return (max(logx, logy) - logxy) / (logN - min(logx, logy))
| [
[
8,
0,
0.0769,
0.1346,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1731,
0.0192,
0,
0.66,
0.125,
873,
0,
2,
0,
0,
873,
0,
0
],
[
2,
0,
0.2308,
0.0577,
0,
0.66,... | [
"'''\nCreated on 29/03/2011\n\n@author: Eran_Z\n\nSearching\n'''",
"from RSM.my_movies.models import Movie, SearchResult",
"def searchSingle(term):\n \"\"\"Returns number of hits for given term.\"\"\"\n return getattr(Movie.objects.get(title=term),'searchSingle')",
" \"\"\"Returns number of hits for ... |
'''
Created on 27/03/2011
@author: Eran_Z
Feasibility study (main)
'''
import weights_m
import scores_m
def generateWeights(algorithm, context, world):
"""Generates a list of weights for the context items.
This is a list of numbers (preferrably positive)"""
return algorithm(context, world)
def calculateScores(algorithm, context, weights, world):
"""Calculates the scores for each of the world items.
In the future, it will probably use many internet searches."""
return algorithm(context, weights, world)
def sortWorld(world, scores):
"""Sorts the world and scores lists according to the scores list,
from largest to smallest."""
combined = [(world[i],scores[i]) for i in range(len(world))]
combined = sorted(combined, key=lambda t: t[1], reverse=True)
return ([w[0] for w in combined], [s[1] for s in combined])
########################################################
########################################################
# MAIN function:
def COR_algorithm(weightingAlgorithm, scoringAlgorithm, context, world):
#get settings
chosenWeightingAlg = weights_m.weightingAlgorithms[weightingAlgorithm]
chosenScoringAlg = scores_m.scoringAlgorithms[scoringAlgorithm]
#First stage: generate weights
weights = generateWeights(chosenWeightingAlg, context, world)
#Second stage: calculate scores
scores = calculateScores(chosenScoringAlg, context, weights, world)
#sort world according to scores
(world, scores) = sortWorld(world, scores)
#return results
return world
########################################################
# example invocation of the algorithm:
#context = ["Aladdin", "Cinderella", "Snow White"]
#world = ["the Exorcist", "Sex and the city", "Toy Story"]
#COR_algorithm("Mutual Information", "Normalized Mutual Information", context, world)
| [
[
8,
0,
0.0727,
0.1273,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1636,
0.0182,
0,
0.66,
0.1667,
760,
0,
1,
0,
0,
760,
0,
0
],
[
1,
0,
0.1818,
0.0182,
0,
0.66... | [
"'''\nCreated on 27/03/2011\n\n@author: Eran_Z\n\nFeasibility study (main)\n'''",
"import weights_m",
"import scores_m",
"def generateWeights(algorithm, context, world):\n \"\"\"Generates a list of weights for the context items.\n This is a list of numbers (preferrably positive)\"\"\"\n return algori... |
#!/usr/bin/env python
"""
generatepot.py script.
This script generates the imdbpy.pot file, from the DTD.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
from datetime import datetime as dt
DEFAULT_MESSAGES = { }
ELEMENT_PATTERN = r"""<!ELEMENT\s+([^\s]+)"""
re_element = re.compile(ELEMENT_PATTERN)
POT_HEADER_TEMPLATE = r"""# Gettext message file for imdbpy
msgid ""
msgstr ""
"Project-Id-Version: imdbpy\n"
"POT-Creation-Date: %(now)s\n"
"PO-Revision-Date: YYYY-MM-DD HH:MM+0000\n"
"Last-Translator: YOUR NAME <YOUR@EMAIL>\n"
"Language-Team: TEAM NAME <TEAM@EMAIL>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Preferred-Encodings: utf-8\n"
"Domain: imdbpy\n"
"""
if len(sys.argv) != 2:
print "Usage: %s dtd_file" % sys.argv[0]
sys.exit()
dtdfilename = sys.argv[1]
dtd = open(dtdfilename).read()
elements = re_element.findall(dtd)
uniq = set(elements)
elements = list(uniq)
print POT_HEADER_TEMPLATE % {
'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000")
}
for element in sorted(elements):
if element in DEFAULT_MESSAGES:
print '# Default: %s' % DEFAULT_MESSAGES[element]
else:
print '# Default: %s' % element.replace('-', ' ').capitalize()
print 'msgid "%s"' % element
print 'msgstr ""'
# use this part instead of the line above to generate the po file for English
#if element in DEFAULT_MESSAGES:
# print 'msgstr "%s"' % DEFAULT_MESSAGES[element]
#else:
# print 'msgstr "%s"' % element.replace('-', ' ').capitalize()
print
| [
[
1,
0,
0.2,
0.2,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.4,
0.2,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.8,
0.2,
0,
0.66,
1,
426,... | [
"import re",
"import sys",
"from datetime import datetime as dt"
] |
#!/usr/bin/env python
"""
generatepot.py script.
This script generates the imdbpy.pot file, from the DTD.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
from datetime import datetime as dt
DEFAULT_MESSAGES = { }
ELEMENT_PATTERN = r"""<!ELEMENT\s+([^\s]+)"""
re_element = re.compile(ELEMENT_PATTERN)
POT_HEADER_TEMPLATE = r"""# Gettext message file for imdbpy
msgid ""
msgstr ""
"Project-Id-Version: imdbpy\n"
"POT-Creation-Date: %(now)s\n"
"PO-Revision-Date: YYYY-MM-DD HH:MM+0000\n"
"Last-Translator: YOUR NAME <YOUR@EMAIL>\n"
"Language-Team: TEAM NAME <TEAM@EMAIL>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Preferred-Encodings: utf-8\n"
"Domain: imdbpy\n"
"""
if len(sys.argv) != 2:
print "Usage: %s dtd_file" % sys.argv[0]
sys.exit()
dtdfilename = sys.argv[1]
dtd = open(dtdfilename).read()
elements = re_element.findall(dtd)
uniq = set(elements)
elements = list(uniq)
print POT_HEADER_TEMPLATE % {
'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000")
}
for element in sorted(elements):
if element in DEFAULT_MESSAGES:
print '# Default: %s' % DEFAULT_MESSAGES[element]
else:
print '# Default: %s' % element.replace('-', ' ').capitalize()
print 'msgid "%s"' % element
print 'msgstr ""'
# use this part instead of the line above to generate the po file for English
#if element in DEFAULT_MESSAGES:
# print 'msgstr "%s"' % DEFAULT_MESSAGES[element]
#else:
# print 'msgstr "%s"' % element.replace('-', ' ').capitalize()
print
| [
[
1,
0,
0.2,
0.2,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.4,
0.2,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.8,
0.2,
0,
0.66,
1,
426,... | [
"import re",
"import sys",
"from datetime import datetime as dt"
] |
#!/usr/bin/env python
"""
rebuildmo.py script.
This script builds the .mo files, from the .po files.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import glob
import msgfmt
import os
#LOCALE_DIR = os.path.dirname(__file__)
def rebuildmo():
lang_glob = 'imdbpy-*.po'
created = []
for input_file in glob.glob(lang_glob):
lang = input_file[7:-3]
if not os.path.exists(lang):
os.mkdir(lang)
mo_dir = os.path.join(lang, 'LC_MESSAGES')
if not os.path.exists(mo_dir):
os.mkdir(mo_dir)
output_file = os.path.join(mo_dir, 'imdbpy.mo')
msgfmt.make(input_file, output_file)
created.append(lang)
return created
if __name__ == '__main__':
languages = rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
| [
[
8,
0,
0.2449,
0.4286,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4898,
0.0204,
0,
0.66,
0.2,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.5102,
0.0204,
0,
0.66,
... | [
"\"\"\"\nrebuildmo.py script.\n\nThis script builds the .mo files, from the .po files.\n\nCopyright 2009 H. Turgut Uyar <uyar@tekir.org>\n\nThis program is free software; you can redistribute it and/or modify",
"import glob",
"import msgfmt",
"import os",
"def rebuildmo():\n lang_glob = 'imdbpy-*.po'\n ... |
#!/usr/bin/env python
"""
rebuildmo.py script.
This script builds the .mo files, from the .po files.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import glob
import msgfmt
import os
#LOCALE_DIR = os.path.dirname(__file__)
def rebuildmo():
lang_glob = 'imdbpy-*.po'
created = []
for input_file in glob.glob(lang_glob):
lang = input_file[7:-3]
if not os.path.exists(lang):
os.mkdir(lang)
mo_dir = os.path.join(lang, 'LC_MESSAGES')
if not os.path.exists(mo_dir):
os.mkdir(mo_dir)
output_file = os.path.join(mo_dir, 'imdbpy.mo')
msgfmt.make(input_file, output_file)
created.append(lang)
return created
if __name__ == '__main__':
languages = rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
| [
[
8,
0,
0.2449,
0.4286,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4898,
0.0204,
0,
0.66,
0.2,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.5102,
0.0204,
0,
0.66,
... | [
"\"\"\"\nrebuildmo.py script.\n\nThis script builds the .mo files, from the .po files.\n\nCopyright 2009 H. Turgut Uyar <uyar@tekir.org>\n\nThis program is free software; you can redistribute it and/or modify",
"import glob",
"import msgfmt",
"import os",
"def rebuildmo():\n lang_glob = 'imdbpy-*.po'\n ... |
"""
locale package (imdb package).
This package provides scripts and files for internationalization
of IMDbPY.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import gettext
import os
LOCALE_DIR = os.path.dirname(__file__)
gettext.bindtextdomain('imdbpy', LOCALE_DIR)
| [
[
8,
0,
0.3966,
0.7586,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.8276,
0.0345,
0,
0.66,
0.25,
723,
0,
1,
0,
0,
723,
0,
0
],
[
1,
0,
0.8621,
0.0345,
0,
0.66,
... | [
"\"\"\"\nlocale package (imdb package).\n\nThis package provides scripts and files for internationalization\nof IMDbPY.\n\nCopyright 2009 H. Turgut Uyar <uyar@tekir.org>",
"import gettext",
"import os",
"LOCALE_DIR = os.path.dirname(__file__)",
"gettext.bindtextdomain('imdbpy', LOCALE_DIR)"
] |
"""
Movie module (imdb package).
This module provides the Movie class, used to store information about
a given movie.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb import articles
from imdb.utils import analyze_title, build_title, canonicalTitle, \
flatten, _Container, cmpMovies
class Movie(_Container):
"""A Movie.
Every information about a movie can be accessed as:
movieObject['information']
to get a list of the kind of information stored in a
Movie object, use the keys() method; some useful aliases
are defined (as "casting" for the "casting director" key); see
the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'plot')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'tv schedule': 'airing',
'user rating': 'rating',
'plot summary': 'plot',
'plot summaries': 'plot',
'directed by': 'director',
'created by': 'creator',
'writing credits': 'writer',
'produced by': 'producer',
'original music by': 'original music',
'non-original music by': 'non-original music',
'music': 'original music',
'cinematography by': 'cinematographer',
'cinematography': 'cinematographer',
'film editing by': 'editor',
'film editing': 'editor',
'editing': 'editor',
'actors': 'cast',
'actresses': 'cast',
'casting by': 'casting director',
'casting': 'casting director',
'art direction by': 'art direction',
'set decoration by': 'set decoration',
'costume design by': 'costume designer',
'costume design': 'costume designer',
'makeup department': 'make up',
'makeup': 'make up',
'make-up': 'make up',
'production management': 'production manager',
'production company': 'production companies',
'second unit director or assistant director':
'assistant director',
'second unit director': 'assistant director',
'sound department': 'sound crew',
'costume and wardrobe department': 'costume department',
'special effects by': 'special effects',
'visual effects by': 'visual effects',
'special effects company': 'special effects companies',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'misc crew': 'miscellaneous crew',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'crew members': 'miscellaneous crew',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'other company': 'miscellaneous companies',
'aka': 'akas',
'also known as': 'akas',
'country': 'countries',
'production country': 'countries',
'production countries': 'countries',
'genre': 'genres',
'runtime': 'runtimes',
'lang': 'languages',
'color': 'color info',
'cover': 'cover url',
'full-size cover': 'full-size cover url',
'seasons': 'number of seasons',
'language': 'languages',
'certificate': 'certificates',
'certifications': 'certificates',
'certification': 'certificates',
'miscellaneous links': 'misc links',
'miscellaneous': 'misc links',
'soundclips': 'sound clips',
'videoclips': 'video clips',
'photographs': 'photo sites',
'distributor': 'distributors',
'distribution': 'distributors',
'distribution companies': 'distributors',
'distribution company': 'distributors',
'guest': 'guests',
'guest appearances': 'guests',
'tv guests': 'guests',
'notable tv guest appearances': 'guests',
'episodes cast': 'guests',
'episodes number': 'number of episodes',
'amazon review': 'amazon reviews',
'merchandising': 'merchandising links',
'merchandise': 'merchandising links',
'sales': 'merchandising links',
'faq': 'faqs',
'parental guide': 'parents guide',
'frequently asked questions': 'faqs'}
keys_tomodify_list = ('plot', 'trivia', 'alternate versions', 'goofs',
'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack',
'crazy credits', 'business', 'supplements',
'video review', 'faqs')
cmpFunct = cmpMovies
def _init(self, **kwds):
"""Initialize a Movie object.
*movieID* -- the unique identifier for the movie.
*title* -- the title of the Movie, if not in the data dictionary.
*myTitle* -- your personal title for the movie.
*myID* -- your personal identifier for the movie.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)'.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
title = kwds.get('title')
if title and not self.data.has_key('title'):
self.set_title(title)
self.movieID = kwds.get('movieID', None)
self.myTitle = kwds.get('myTitle', u'')
def _reset(self):
"""Reset the Movie object."""
self.movieID = None
self.myTitle = u''
def set_title(self, title):
"""Set the title of the movie."""
# XXX: convert title to unicode, if it's a plain string?
d_title = analyze_title(title)
self.data.update(d_title)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('title'):
addkeys += ['canonical title', 'long imdb title',
'long imdb canonical title',
'smart canonical title',
'smart long imdb canonical title']
if self.data.has_key('episode of'):
addkeys += ['long imdb episode title', 'series title',
'canonical series title', 'episode title',
'canonical episode title',
'smart canonical series title',
'smart canonical episode title']
if self.data.has_key('cover url'):
addkeys += ['full-size cover url']
return addkeys
def guessLanguage(self):
"""Guess the language of the title of this movie; returns None
if there are no hints."""
lang = self.get('languages')
if lang:
lang = lang[0]
else:
country = self.get('countries')
if country:
lang = articles.COUNTRY_LANG.get(country[0])
return lang
def smartCanonicalTitle(self, title=None, lang=None):
"""Return the canonical title, guessing its language.
The title can be forces with the 'title' argument (internally
used) and the language can be forced with the 'lang' argument,
otherwise it's auto-detected."""
if title is None:
title = self.data.get('title', u'')
if lang is None:
lang = self.guessLanguage()
return canonicalTitle(title, lang=lang)
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('episode of'):
if key == 'long imdb episode title':
return build_title(self.data)
elif key == 'series title':
return self.data['episode of']['title']
elif key == 'canonical series title':
ser_title = self.data['episode of']['title']
return canonicalTitle(ser_title)
elif key == 'smart canonical series title':
ser_title = self.data['episode of']['title']
return self.smartCanonicalTitle(ser_title)
elif key == 'episode title':
return self.data.get('title', u'')
elif key == 'canonical episode title':
return canonicalTitle(self.data.get('title', u''))
elif key == 'smart canonical episode title':
return self.smartCanonicalTitle(self.data.get('title', u''))
if self.data.has_key('title'):
if key == 'title':
return self.data['title']
elif key == 'long imdb title':
return build_title(self.data)
elif key == 'canonical title':
return canonicalTitle(self.data['title'])
elif key == 'smart canonical title':
return self.smartCanonicalTitle(self.data['title'])
elif key == 'long imdb canonical title':
return build_title(self.data, canonical=1)
elif key == 'smart long imdb canonical title':
return build_title(self.data, canonical=1,
lang=self.guessLanguage())
if key == 'full-size cover url' and self.data.has_key('cover url'):
return self._re_fullsizeURL.sub('', self.data.get('cover url', ''))
return None
def getID(self):
"""Return the movieID."""
return self.movieID
def __nonzero__(self):
"""The Movie is "false" if the self.data does not contain a title."""
# XXX: check the title and the movieID?
if self.data.has_key('title'): return 1
return 0
def isSameTitle(self, other):
"""Return true if this and the compared object have the same
long imdb title and/or movieID.
"""
# XXX: obsolete?
if not isinstance(other, self.__class__): return 0
if self.data.has_key('title') and \
other.data.has_key('title') and \
build_title(self.data, canonical=0) == \
build_title(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.movieID is not None and self.movieID == other.movieID:
return 1
return 0
isSameMovie = isSameTitle # XXX: just for backward compatiblity.
def __contains__(self, item):
"""Return true if the given Person object is listed in this Movie,
or if the the given Character is represented in this Movie."""
from Person import Person
from Character import Character
from Company import Company
if isinstance(item, Person):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p):
return 1
elif isinstance(item, Character):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p.currentRole):
return 1
elif isinstance(item, Company):
for c in flatten(self.data, yieldDictKeys=1, scalar=Company,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(c):
return 1
return 0
def __deepcopy__(self, memo):
"""Return a deep copy of a Movie instance."""
m = Movie(title=u'', movieID=self.movieID, myTitle=self.myTitle,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
m.current_info = list(self.current_info)
m.set_mod_funct(self.modFunct)
return m
def __repr__(self):
"""String representation of a Movie object."""
# XXX: add also currentRole and notes, if present?
if self.has_key('long imdb episode title'):
title = self.get('long imdb episode title')
else:
title = self.get('long imdb title')
r = '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem,
title)
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short title."""
return self.get('title', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('title', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the movie."""
if not self: return u''
def _nameAndRole(personList, joiner=u', '):
"""Build a pretty string with name and role."""
nl = []
for person in personList:
n = person.get('name', u'')
if person.currentRole: n += u' (%s)' % person.currentRole
nl.append(n)
return joiner.join(nl)
s = u'Movie\n=====\nTitle: %s\n' % \
self.get('long imdb canonical title', u'')
genres = self.get('genres')
if genres: s += u'Genres: %s.\n' % u', '.join(genres)
director = self.get('director')
if director:
s += u'Director: %s.\n' % _nameAndRole(director)
writer = self.get('writer')
if writer:
s += u'Writer: %s.\n' % _nameAndRole(writer)
cast = self.get('cast')
if cast:
cast = cast[:5]
s += u'Cast: %s.\n' % _nameAndRole(cast)
runtime = self.get('runtimes')
if runtime:
s += u'Runtime: %s.\n' % u', '.join(runtime)
countries = self.get('countries')
if countries:
s += u'Country: %s.\n' % u', '.join(countries)
lang = self.get('languages')
if lang:
s += u'Language: %s.\n' % u', '.join(lang)
rating = self.get('rating')
if rating:
s += u'Rating: %s' % rating
nr_votes = self.get('votes')
if nr_votes:
s += u' (%s votes)' % nr_votes
s += u'.\n'
plot = self.get('plot')
if not plot:
plot = self.get('plot summary')
if plot:
plot = [plot]
if plot:
plot = plot[0]
i = plot.find('::')
if i != -1:
plot = plot[:i]
s += u'Plot: %s' % plot
return s
| [
[
8,
0,
0.0289,
0.0553,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0603,
0.0025,
0,
0.66,
0.25,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.0653,
0.0025,
0,
0.66,
... | [
"\"\"\"\nMovie module (imdb package).\n\nThis module provides the Movie class, used to store information about\na given movie.\n\nCopyright 2004-2010 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb import articles",
"from imdb.utils import analyze_title, build_title, canonicalTit... |
"""
Person module (imdb package).
This module provides the Person class, used to store information about
a given person.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_name, build_name, normalizeName, \
flatten, _Container, cmpPeople
class Person(_Container):
"""A Person.
Every information about a person can be accessed as:
personObject['information']
to get a list of the kind of information stored in a
Person object, use the keys() method; some useful aliases
are defined (as "biography" for the "mini biography" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {'biography': 'mini biography',
'bio': 'mini biography',
'aka': 'akas',
'also known as': 'akas',
'nick name': 'nick names',
'nicks': 'nick names',
'nickname': 'nick names',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'misc': 'miscellaneous crew',
'guest': 'notable tv guest appearances',
'guests': 'notable tv guest appearances',
'tv guest': 'notable tv guest appearances',
'guest appearances': 'notable tv guest appearances',
'spouses': 'spouse',
'salary': 'salary history',
'salaries': 'salary history',
'otherworks': 'other works',
"maltin's biography":
"biography from leonard maltin's movie encyclopedia",
"leonard maltin's biography":
"biography from leonard maltin's movie encyclopedia",
'real name': 'birth name',
'where are they now': 'where now',
'personal quotes': 'quotes',
'mini-biography author': 'imdb mini-biography by',
'biography author': 'imdb mini-biography by',
'genre': 'genres',
'portrayed': 'portrayed in',
'keys': 'keywords',
'trademarks': 'trade mark',
'trade mark': 'trade mark',
'trade marks': 'trade mark',
'trademark': 'trade mark',
'pictorials': 'pictorial',
'magazine covers': 'magazine cover photo',
'magazine-covers': 'magazine cover photo',
'tv series episodes': 'episodes',
'tv-series episodes': 'episodes',
'articles': 'article',
'keyword': 'keywords'}
# 'nick names'???
keys_tomodify_list = ('mini biography', 'spouse', 'quotes', 'other works',
'salary history', 'trivia', 'trade mark', 'news',
'books', 'biographical movies', 'portrayed in',
'where now', 'interviews', 'article',
"biography from leonard maltin's movie encyclopedia")
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Person object.
*personID* -- the unique identifier for the person.
*name* -- the name of the Person, if not in the data dictionary.
*myName* -- the nickname you use for this person.
*myID* -- your personal id for this person.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes about the given person for a specific movie
or role (e.g.: the alias used in the movie credits).
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*modFunct* -- function called returning text fields.
*billingPos* -- position of this person in the credits list.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.personID = kwds.get('personID', None)
self.myName = kwds.get('myName', u'')
self.billingPos = kwds.get('billingPos', None)
def _reset(self):
"""Reset the Person object."""
self.personID = None
self.myName = u''
self.billingPos = None
def _clear(self):
"""Reset the dictionary."""
self.billingPos = None
def set_name(self, name):
"""Set the name of the person."""
# XXX: convert name to unicode, if it's a plain string?
d = analyze_name(name, canonical=1)
self.data.update(d)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('name'):
addkeys += ['canonical name', 'long imdb name',
'long imdb canonical name']
if self.data.has_key('headshot'):
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('name'):
if key == 'name':
return normalizeName(self.data['name'])
elif key == 'canonical name':
return self.data['name']
elif key == 'long imdb name':
return build_name(self.data, canonical=0)
elif key == 'long imdb canonical name':
return build_name(self.data)
if key == 'full-size headshot' and self.data.has_key('headshot'):
return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
return None
def getID(self):
"""Return the personID."""
return self.personID
def __nonzero__(self):
"""The Person is "false" if the self.data does not contain a name."""
# XXX: check the name and the personID?
if self.data.has_key('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this Person has worked in the given Movie,
or if the fiven Character was played by this Person."""
from Movie import Movie
from Character import Character
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
elif isinstance(item, Character):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m.currentRole):
return 1
return 0
def isSameName(self, other):
"""Return true if two persons have the same name and imdbIndex
and/or personID.
"""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_name(self.data, canonical=1) == \
build_name(other.data, canonical=1):
return 1
if self.accessSystem == other.accessSystem and \
self.personID and self.personID == other.personID:
return 1
return 0
isSamePerson = isSameName # XXX: just for backward compatiblity.
def __deepcopy__(self, memo):
"""Return a deep copy of a Person instance."""
p = Person(name=u'', personID=self.personID, myName=self.myName,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
p.current_info = list(self.current_info)
p.set_mod_funct(self.modFunct)
p.billingPos = self.billingPos
return p
def __repr__(self):
"""String representation of a Person object."""
# XXX: add also currentRole and notes, if present?
r = '<Person id:%s[%s] name:_%s_>' % (self.personID, self.accessSystem,
self.get('long imdb canonical name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the person."""
if not self: return u''
s = u'Person\n=====\nName: %s\n' % \
self.get('long imdb canonical name', u'')
bdate = self.get('birth date')
if bdate:
s += u'Birth date: %s' % bdate
bnotes = self.get('birth notes')
if bnotes:
s += u' (%s)' % bnotes
s += u'.\n'
ddate = self.get('death date')
if ddate:
s += u'Death date: %s' % ddate
dnotes = self.get('death notes')
if dnotes:
s += u' (%s)' % dnotes
s += u'.\n'
bio = self.get('mini biography')
if bio:
s += u'Biography: %s\n' % bio[0]
director = self.get('director')
if director:
d_list = [x.get('long imdb canonical title', u'')
for x in director[:3]]
s += u'Last movies directed: %s.\n' % u'; '.join(d_list)
act = self.get('actor') or self.get('actress')
if act:
a_list = [x.get('long imdb canonical title', u'')
for x in act[:5]]
s += u'Last movies acted: %s.\n' % u'; '.join(a_list)
return s
| [
[
8,
0,
0.0418,
0.08,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0873,
0.0036,
0,
0.66,
0.3333,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.0964,
0.0073,
0,
0.66,
... | [
"\"\"\"\nPerson module (imdb package).\n\nThis module provides the Person class, used to store information about\na given person.\n\nCopyright 2004-2010 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb.utils import analyze_name, build_name, normalizeName, \\\n ... |
"""
Character module (imdb package).
This module provides the Character class, used to store information about
a given character.
Copyright 2007-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople
class Character(_Container):
"""A Character.
Every information about a character can be accessed as:
characterObject['information']
to get a list of the kind of information stored in a
Character object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {'mini biography': 'biography',
'bio': 'biography',
'character biography': 'biography',
'character biographies': 'biography',
'biographies': 'biography',
'character bio': 'biography',
'aka': 'akas',
'also known as': 'akas',
'alternate names': 'akas',
'personal quotes': 'quotes',
'keys': 'keywords',
'keyword': 'keywords'}
keys_tomodify_list = ('biography', 'quotes')
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Character object.
*characterID* -- the unique identifier for the character.
*name* -- the name of the Character, if not in the data dictionary.
*myName* -- the nickname you use for this character.
*myID* -- your personal id for this character.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given character.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.characterID = kwds.get('characterID', None)
self.myName = kwds.get('myName', u'')
def _reset(self):
"""Reset the Character object."""
self.characterID = None
self.myName = u''
def set_name(self, name):
"""Set the name of the character."""
# XXX: convert name to unicode, if it's a plain string?
d = analyze_name(name, canonical=0)
self.data.update(d)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('name'):
addkeys += ['long imdb name']
if self.data.has_key('headshot'):
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
## XXX: can a character have an imdbIndex?
if self.data.has_key('name'):
if key == 'long imdb name':
return build_name(self.data)
if key == 'full-size headshot' and self.data.has_key('headshot'):
return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
return None
def getID(self):
"""Return the characterID."""
return self.characterID
def __nonzero__(self):
"""The Character is "false" if the self.data does not contain a name."""
# XXX: check the name and the characterID?
if self.data.get('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this Character was portrayed in the given Movie
or it was impersonated by the given Person."""
from Movie import Movie
from Person import Person
if isinstance(item, Person):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m.currentRole):
return 1
elif isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
return 0
def isSameName(self, other):
"""Return true if two character have the same name
and/or characterID."""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_name(self.data, canonical=0) == \
build_name(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.characterID is not None and \
self.characterID == other.characterID:
return 1
return 0
isSameCharacter = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a Character instance."""
c = Character(name=u'', characterID=self.characterID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Character object."""
r = '<Character id:%s[%s] name:_%s_>' % (self.characterID,
self.accessSystem,
self.get('name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the character."""
if not self: return u''
s = u'Character\n=====\nName: %s\n' % \
self.get('name', u'')
bio = self.get('biography')
if bio:
s += u'Biography: %s\n' % bio[0]
filmo = self.get('filmography')
if filmo:
a_list = [x.get('long imdb canonical title', u'')
for x in filmo[:5]]
s += u'Last movies with this character: %s.\n' % u'; '.join(a_list)
return s
| [
[
8,
0,
0.0584,
0.1117,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1218,
0.0051,
0,
0.66,
0.3333,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.132,
0.0051,
0,
0.66,... | [
"\"\"\"\nCharacter module (imdb package).\n\nThis module provides the Character class, used to store information about\na given character.\n\nCopyright 2007-2010 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople",
... |
#-*- encoding: utf-8 -*-
"""
parser.sql.dbschema module (imdb.parser.sql package).
This module provides the schema used to describe the layout of the
database used by the imdb.parser.sql package; functions to create/drop
tables and indexes are also provided.
Copyright 2005-2010 Davide Alberani <da@erlug.linux.it>
2006 Giuseppe "Cowo" Corbelli <cowo --> lugbs.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
_dbschema_logger = logging.getLogger('imdbpy.parser.sql.dbschema')
# Placeholders for column types.
INTCOL = 1
UNICODECOL = 2
STRINGCOL = 3
_strMap = {1: 'INTCOL', 2: 'UNICODECOL', 3: 'STRINGCOL'}
class DBCol(object):
"""Define column objects."""
def __init__(self, name, kind, **params):
self.name = name
self.kind = kind
self.index = None
self.indexLen = None
# If not None, two notations are accepted: 'TableName'
# and 'TableName.ColName'; in the first case, 'id' is assumed
# as the name of the pointed column.
self.foreignKey = None
if 'index' in params:
self.index = params['index']
del params['index']
if 'indexLen' in params:
self.indexLen = params['indexLen']
del params['indexLen']
if 'foreignKey' in params:
self.foreignKey = params['foreignKey']
del params['foreignKey']
self.params = params
def __str__(self):
"""Class representation."""
s = '<DBCol %s %s' % (self.name, _strMap[self.kind])
if self.index:
s += ' INDEX'
if self.indexLen:
s += '[:%d]' % self.indexLen
if self.foreignKey:
s += ' FOREIGN'
if 'default' in self.params:
val = self.params['default']
if val is not None:
val = '"%s"' % val
s += ' DEFAULT=%s' % val
for param in self.params:
if param == 'default': continue
s += ' %s' % param.upper()
s += '>'
return s
def __repr__(self):
"""Class representation."""
s = '<DBCol(name="%s", %s' % (self.name, _strMap[self.kind])
if self.index:
s += ', index="%s"' % self.index
if self.indexLen:
s += ', indexLen=%d' % self.indexLen
if self.foreignKey:
s += ', foreignKey="%s"' % self.foreignKey
for param in self.params:
val = self.params[param]
if isinstance(val, (unicode, str)):
val = u'"%s"' % val
s += ', %s=%s' % (param, val)
s += ')>'
return s
class DBTable(object):
"""Define table objects."""
def __init__(self, name, *cols, **kwds):
self.name = name
self.cols = cols
# Default values.
self.values = kwds.get('values', {})
def __str__(self):
"""Class representation."""
return '<DBTable %s (%d cols, %d values)>' % (self.name,
len(self.cols), sum([len(v) for v in self.values.values()]))
def __repr__(self):
"""Class representation."""
s = '<DBTable(name="%s"' % self.name
col_s = ', '.join([repr(col).rstrip('>').lstrip('<')
for col in self.cols])
if col_s:
s += ', %s' % col_s
if self.values:
s += ', values=%s' % self.values
s += ')>'
return s
# Default values to insert in some tables: {'column': (list, of, values, ...)}
kindTypeDefs = {'kind': ('movie', 'tv series', 'tv movie', 'video movie',
'tv mini series', 'video game', 'episode')}
companyTypeDefs = {'kind': ('distributors', 'production companies',
'special effects companies', 'miscellaneous companies')}
infoTypeDefs = {'info': ('runtimes', 'color info', 'genres', 'languages',
'certificates', 'sound mix', 'tech info', 'countries', 'taglines',
'keywords', 'alternate versions', 'crazy credits', 'goofs',
'soundtrack', 'quotes', 'release dates', 'trivia', 'locations',
'mini biography', 'birth notes', 'birth date', 'height',
'death date', 'spouse', 'other works', 'birth name',
'salary history', 'nick names', 'books', 'agent address',
'biographical movies', 'portrayed in', 'where now', 'trade mark',
'interviews', 'article', 'magazine cover photo', 'pictorial',
'death notes', 'LD disc format', 'LD year', 'LD digital sound',
'LD official retail price', 'LD frequency response', 'LD pressing plant',
'LD length', 'LD language', 'LD review', 'LD spaciality', 'LD release date',
'LD production country', 'LD contrast', 'LD color rendition',
'LD picture format', 'LD video noise', 'LD video artifacts',
'LD release country', 'LD sharpness', 'LD dynamic range',
'LD audio noise', 'LD color information', 'LD group genre',
'LD quality program', 'LD close captions-teletext-ld-g',
'LD category', 'LD analog left', 'LD certification',
'LD audio quality', 'LD video quality', 'LD aspect ratio',
'LD analog right', 'LD additional information',
'LD number of chapter stops', 'LD dialogue intellegibility',
'LD disc size', 'LD master format', 'LD subtitles',
'LD status of availablility', 'LD quality of source',
'LD number of sides', 'LD video standard', 'LD supplement',
'LD original title', 'LD sound encoding', 'LD number', 'LD label',
'LD catalog number', 'LD laserdisc title', 'screenplay-teleplay',
'novel', 'adaption', 'book', 'production process protocol',
'printed media reviews', 'essays', 'other literature', 'mpaa',
'plot', 'votes distribution', 'votes', 'rating',
'production dates', 'copyright holder', 'filming dates', 'budget',
'weekend gross', 'gross', 'opening weekend', 'rentals',
'admissions', 'studios', 'top 250 rank', 'bottom 10 rank')}
compCastTypeDefs = {'kind': ('cast', 'crew', 'complete', 'complete+verified')}
linkTypeDefs = {'link': ('follows', 'followed by', 'remake of', 'remade as',
'references', 'referenced in', 'spoofs', 'spoofed in',
'features', 'featured in', 'spin off from', 'spin off',
'version of', 'similar to', 'edited into',
'edited from', 'alternate language version of',
'unknown link')}
roleTypeDefs = {'role': ('actor', 'actress', 'producer', 'writer',
'cinematographer', 'composer', 'costume designer',
'director', 'editor', 'miscellaneous crew',
'production designer', 'guest')}
# Schema of tables in our database.
# XXX: Foreign keys can be used to create constrains between tables,
# but they create indexes in the database, and this
# means poor performances at insert-time.
DB_SCHEMA = [
DBTable('Name',
# namePcodeCf is the soundex of the name in the canonical format.
# namePcodeNf is the soundex of the name in the normal format, if
# different from namePcodeCf.
# surnamePcode is the soundex of the surname, if different from the
# other two values.
# The 'id' column is simply skipped by SQLObject (it's a default);
# the alternateID attribute here will be ignored by SQLAlchemy.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CharName',
# namePcodeNf is the soundex of the name in the normal format.
# surnamePcode is the soundex of the surname, if different
# from namePcodeNf.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyName',
# namePcodeNf is the soundex of the name in the normal format.
# namePcodeSf is the soundex of the name plus the country code.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('countryCode', UNICODECOL, length=255, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('namePcodeSf', STRINGCOL, length=5, default=None,
index='idx_pcodesf'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('KindType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=15, default=None, alternateID=True),
values=kindTypeDefs
),
DBTable('Title',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('title', UNICODECOL, notNone=True,
index='idx_title', indexLen=10),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='Title'),
DBCol('seasonNr', INTCOL, default=None),
DBCol('episodeNr', INTCOL, default=None),
# Maximum observed length is 44; 49 can store 5 comma-separated
# year-year pairs.
DBCol('seriesYears', STRINGCOL, length=49, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, default=None, alternateID=True),
values=companyTypeDefs
),
DBTable('AkaName',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_person',
foreignKey='Name'),
DBCol('name', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('AkaTitle',
# XXX: It's safer to set notNone to False, here.
# alias for akas are stored completely in the AkaTitle table;
# this means that episodes will set also a "tv series" alias name.
# Reading the aka-title.list file it looks like there are
# episode titles with aliases to different titles for both
# the episode and the series title, while for just the series
# there are no aliases.
# E.g.:
# aka title original title
# "Series, The" (2005) {The Episode} "Other Title" (2005) {Other Title}
# But there is no:
# "Series, The" (2005) "Other Title" (2005)
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_movieid',
foreignKey='Title'),
DBCol('title', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='AkaTitle'),
DBCol('seasonNr', INTCOL, default=None),
DBCol('episodeNr', INTCOL, default=None),
DBCol('note', UNICODECOL, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('RoleType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('role', STRINGCOL, length=32, notNone=True, alternateID=True),
values=roleTypeDefs
),
DBTable('CastInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('personRoleID', INTCOL, default=None, index='idx_cid',
foreignKey='CharName'),
DBCol('note', UNICODECOL, default=None),
DBCol('nrOrder', INTCOL, default=None),
DBCol('roleID', INTCOL, notNone=True, foreignKey='RoleType')
),
DBTable('CompCastType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, notNone=True, alternateID=True),
values=compCastTypeDefs
),
DBTable('CompleteCast',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, index='idx_mid', foreignKey='Title'),
DBCol('subjectID', INTCOL, notNone=True, foreignKey='CompCastType'),
DBCol('statusID', INTCOL, notNone=True, foreignKey='CompCastType')
),
DBTable('InfoType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('info', STRINGCOL, length=32, notNone=True, alternateID=True),
values=infoTypeDefs
),
DBTable('LinkType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('link', STRINGCOL, length=32, notNone=True, alternateID=True),
values=linkTypeDefs
),
DBTable('Keyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
# XXX: can't use alternateID=True, because it would create
# a UNIQUE index; unfortunately (at least with a common
# collation like utf8_unicode_ci) MySQL will consider
# some different keywords identical - like
# "fiancée" and "fiancee".
DBCol('keyword', UNICODECOL, length=255, notNone=True,
index='idx_keyword', indexLen=5),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode')
),
DBTable('MovieKeyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('keywordID', INTCOL, notNone=True, index='idx_keywordid',
foreignKey='Keyword')
),
DBTable('MovieLink',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('linkedMovieID', INTCOL, notNone=True, foreignKey='Title'),
DBCol('linkTypeID', INTCOL, notNone=True, foreignKey='LinkType')
),
DBTable('MovieInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
),
# This table is identical to MovieInfo, except that both 'infoTypeID'
# and 'info' are indexed.
DBTable('MovieInfoIdx',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, index='idx_infotypeid',
foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True, index='idx_info', indexLen=10),
DBCol('note', UNICODECOL, default=None)
),
DBTable('MovieCompanies',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('companyID', INTCOL, notNone=True, index='idx_cid',
foreignKey='CompanyName'),
DBCol('companyTypeID', INTCOL, notNone=True, foreignKey='CompanyType'),
DBCol('note', UNICODECOL, default=None)
),
DBTable('PersonInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
)
]
# Functions to manage tables.
def dropTables(tables, ifExists=True):
"""Drop the tables."""
# In reverse order (useful to avoid errors about foreign keys).
DB_TABLES_DROP = list(tables)
DB_TABLES_DROP.reverse()
for table in DB_TABLES_DROP:
_dbschema_logger.info('dropping table %s', table._imdbpyName)
table.dropTable(ifExists)
def createTables(tables, ifNotExists=True):
"""Create the tables and insert default values."""
for table in tables:
# Create the table.
_dbschema_logger.info('creating table %s', table._imdbpyName)
table.createTable(ifNotExists)
# Insert default values, if any.
if table._imdbpySchema.values:
_dbschema_logger.info('inserting values into table %s',
table._imdbpyName)
for key in table._imdbpySchema.values:
for value in table._imdbpySchema.values[key]:
table(**{key: unicode(value)})
def createIndexes(tables, ifNotExists=True):
"""Create the indexes in the database."""
for table in tables:
_dbschema_logger.info('creating indexes for table %s',
table._imdbpyName)
table.addIndexes(ifNotExists)
def createForeignKeys(tables, ifNotExists=True):
"""Create Foreign Keys."""
mapTables = {}
for table in tables:
mapTables[table._imdbpyName] = table
for table in tables:
_dbschema_logger.info('creating foreign keys for table %s',
table._imdbpyName)
table.addForeignKeys(mapTables, ifNotExists)
| [
[
8,
0,
0.0293,
0.0521,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0586,
0.0022,
0,
0.66,
0.0526,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0629,
0.0022,
0,
0.6... | [
"\"\"\"\nparser.sql.dbschema module (imdb.parser.sql package).\n\nThis module provides the schema used to describe the layout of the\ndatabase used by the imdb.parser.sql package; functions to create/drop\ntables and indexes are also provided.\n\nCopyright 2005-2010 Davide Alberani <da@erlug.linux.it>",
"import l... |
"""
parser.http.movieParser module (imdb package).
This module provides the classes (and the instances), used to parse the
IMDb pages on the akas.imdb.com server about a movie.
E.g., for Brian De Palma's "The Untouchables", the referred
pages would be:
combined details: http://akas.imdb.com/title/tt0094226/combined
plot summary: http://akas.imdb.com/title/tt0094226/plotsummary
...and so on...
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import urllib
from imdb import imdbURL_base
from imdb.Person import Person
from imdb.Movie import Movie
from imdb.Company import Company
from imdb.utils import analyze_title, split_company_name_notes, _Container
from utils import build_person, DOMParserBase, Attribute, Extractor, \
analyze_imdbid
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'seasons': 'number of seasons',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'production managers': 'production manager',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = u'/'
else:
roleID += u'/'
newRoles.append(u'<div class="_imdbpyrole" roleid="%s">%s</div>' % \
(roleID, role.strip()))
return firstHalf + u' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="char">)(.*?)(</td>)',
re.I | re.M | re.S)
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::')
_reAkas = re.compile(r'<h5>also known as:</h5>.*?</div>', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x: return x
x = x.strip()
if not x: return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = filter(None, [j.strip() for j in lx])
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "combined details" (and if instance.mdparse is
True also for the "main details") page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
mparser = DOMHTMLMovieParser()
result = mparser.parse(combined_details_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title',
path="//h1",
attrs=Attribute(key='title',
path=".//text()",
postprocess=analyze_title)),
Extractor(label='glossarysections',
group="//a[@class='glossary']",
group_key="./@name",
group_key_normalize=lambda x: x.replace('_', ' '),
path="../../../..//tr",
attrs=Attribute(key=None,
multi=True,
path={'person': ".//text()",
'link': "./td[1]/a[@href]/@href"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='cast',
path="//table[@class='cast']//tr",
attrs=Attribute(key="cast",
multi=True,
path={'person': ".//text()",
'link': "td[2]/a/@href",
'roleID': \
"td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'))
)),
Extractor(label='genres',
path="//div[@class='info']//a[starts-with(@href," \
" '/Sections/Genres')]",
attrs=Attribute(key="genres",
multi=True,
path="./text()")),
Extractor(label='h5sections',
path="//div[@class='info']/h5/..",
attrs=[
Attribute(key="plot summary",
path="./h5[starts-with(text(), " \
"'Plot:')]/../div/text()",
postprocess=lambda x: \
x.strip().rstrip('|').rstrip()),
Attribute(key="aspect ratio",
path="./h5[starts-with(text()," \
" 'Aspect')]/../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="mpaa",
path="./h5/a[starts-with(text()," \
" 'MPAA')]/../../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="countries",
path="./h5[starts-with(text(), " \
"'Countr')]/../div[@class='info-content']//text()",
postprocess=makeSplitter('|')),
Attribute(key="language",
path="./h5[starts-with(text(), " \
"'Language')]/..//text()",
postprocess=makeSplitter('Language:')),
Attribute(key='color info',
path="./h5[starts-with(text(), " \
"'Color')]/..//text()",
postprocess=makeSplitter('Color:')),
Attribute(key='sound mix',
path="./h5[starts-with(text(), " \
"'Sound Mix')]/..//text()",
postprocess=makeSplitter('Sound Mix:')),
# Collects akas not encosed in <i> tags.
Attribute(key='other akas',
path="./h5[starts-with(text(), " \
"'Also Known As')]/../div//text()",
postprocess=makeSplitter(sep='::',
origNotesSep='" - ',
newNotesSep='::',
strip='"')),
Attribute(key='runtimes',
path="./h5[starts-with(text(), " \
"'Runtime')]/../div/text()",
postprocess=makeSplitter()),
Attribute(key='certificates',
path="./h5[starts-with(text(), " \
"'Certificat')]/..//text()",
postprocess=makeSplitter('Certification:')),
Attribute(key='number of seasons',
path="./h5[starts-with(text(), " \
"'Seasons')]/..//text()",
postprocess=lambda x: x.count('|') + 1),
Attribute(key='original air date',
path="./h5[starts-with(text(), " \
"'Original Air Date')]/../div/text()"),
Attribute(key='tv series link',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/@href"),
Attribute(key='tv series title',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/text()")
]),
Extractor(label='creator',
path="//h5[starts-with(text(), 'Creator')]/..//a",
attrs=Attribute(key='creator', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin writer',
path="//h5[starts-with(text(), 'Writer')]/..//a",
attrs=Attribute(key='thin writer', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin director',
path="//h5[starts-with(text(), 'Director')]/..//a",
attrs=Attribute(key='thin director', multi=True,
path={'name': "./text()",
'link': "@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='top 250/bottom 100',
path="//div[@class='starbar-special']/" \
"a[starts-with(@href, '/chart/')]",
attrs=Attribute(key='top/bottom rank',
path="./text()")),
Extractor(label='series years',
path="//div[@id='tn15title']//span" \
"[starts-with(text(), 'TV series')]",
attrs=Attribute(key='series years',
path="./text()",
postprocess=lambda x: \
x.replace('TV series','').strip())),
Extractor(label='number of episodes',
path="//a[@title='Full Episode List']",
attrs=Attribute(key='number of episodes',
path="./text()",
postprocess=lambda x: \
_toInt(x, [(' Episodes', '')]))),
Extractor(label='akas',
path="//i[@class='transl']",
attrs=Attribute(key='akas', multi=True, path='text()',
postprocess=lambda x:
x.replace(' ', ' ').rstrip('-').replace('" - ',
'"::', 1).strip('"').replace(' ', ' '))),
Extractor(label='production notes/status',
path="//div[@class='info inprod']",
attrs=Attribute(key='production notes',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='blackcatheader',
group="//b[@class='blackcatheader']",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../ul/li",
attrs=Attribute(key=None,
multi=True,
path={'name': "./a//text()",
'comp-link': "./a/@href",
'notes': "./text()"},
postprocess=lambda x: \
Company(name=x.get('name') or u'',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or u'').strip())
)),
Extractor(label='rating',
path="//div[@class='starbar-meta']/b",
attrs=Attribute(key='rating',
path=".//text()")),
Extractor(label='votes',
path="//div[@class='starbar-meta']/a[@href]",
attrs=Attribute(key='votes',
path=".//text()")),
Extractor(label='cover url',
path="//a[@name='poster']",
attrs=Attribute(key='cover url',
path="./img/@src"))
]
preprocessors = [
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I),
r'</div><div>\1'),
('<small>Full cast and crew for<br></small>', ''),
('<td> </td>', '<td>...</td>'),
('<span class="tv-extra">TV mini-series</span>',
'<span class="tv-extra">(mini)</span>'),
(_reRolesMovie, _manageRoles),
(_reAkas, _replaceBR)]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
for proLink in self.xpath(dom, "//span[@class='pro-link']"):
proLink.drop_tree()
# Remove some 'more' links (keep others, like the one around
# the number of votes).
for tn15more in self.xpath(dom,
"//a[@class='tn15more'][starts-with(@href, '/title/')]"):
tn15more.drop_tree()
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in data.keys():
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = filter(lambda x: x.personID is not None, value)
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if akas:
data['akas'] = akas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', u'')
for x in data['runtimes']]
if 'production notes' in data:
pn = data['production notes'].replace('\n\nComments:',
'\nComments:').replace('\n\nNote:',
'\nNote:').replace('Note:\n\n',
'Note:\n').split('\n')
for k, v in zip(pn[::2], pn[1::2]):
v = v.strip()
if not v:
continue
k = k.lower().strip(':')
if k == 'note':
k = 'status note'
data[k] = v
del data['production notes']
if 'original air date' in data:
oid = self.re_space.sub(' ', data['original air date']).strip()
data['original air date'] = oid
aid = self.re_airdate.findall(oid)
if aid and len(aid[0]) == 3:
date, season, episode = aid[0]
date = date.strip()
try: season = int(season)
except: pass
try: episode = int(episode)
except: pass
if date and date != '????':
data['original air date'] = date
else:
del data['original air date']
# Handle also "episode 0".
if season or type(season) is type(0):
data['season'] = season
if episode or type(season) is type(0):
data['episode'] = episode
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top 250: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom 100: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(
data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if 'votes' in data:
try:
votes = data['votes'].replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
if xauthor:
xauthor = xauthor.replace('{', '<').replace('}', '>').replace('(',
'<').replace(')', '>').strip()
xplot = x.get('plot', u'').strip()
if xauthor:
xplot += u'::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example:
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
extractors = [Extractor(label='plot',
path="//p[@class='plotpar']",
attrs=Attribute(key='plot',
multi=True,
path={'plot': './text()',
'author': './i/a/text()'},
postprocess=_process_plotsummary))]
def _process_award(x):
award = {}
award['award'] = x.get('award').strip()
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip()
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
extractors = [
Extractor(label='awards',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()][not(@colspan)]",
attrs=Attribute(key=None,
multi=True,
path={
'year': "../td[1]/a/text()",
'result': "../td[2]/b/text()",
'award': "../td[3]/text()",
'category': "./text()[1]",
# FIXME: takes only the first co-recipient
'with': "./small[starts-with(text()," \
" 'Shared with:')]/following-sibling::a[1]/text()",
'notes': "./small[last()]//text()",
'anchor': ".//text()"
},
postprocess=_process_award
)),
Extractor(label='recipients',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()]/small[1]/preceding-sibling::a",
attrs=Attribute(key=None,
multi=True,
path={
'name': "./text()",
'link': "./@href",
'anchor': "..//text()"
}
))
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span-1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
# XXX: beware that here we don't use an "adapted" function,
# because both BeautifulSoup and lxml uses the same
# "insert" method.
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for key in data.keys():
dom = self.get_dom(key)
assigner = self.xpath(dom, "//a/text()")[0]
for entry in data[key]:
if not entry.has_key('name'):
if not entry:
continue
# this is an award, not a recipient
entry['assigner'] = assigner.strip()
# find the recipients
matches = [p for p in data[key]
if p.has_key('name') and (entry['anchor'] ==
p['anchor'])]
if self.subject == 'title':
recipients = [Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['to'] = recipients
elif self.subject == 'name':
recipients = [Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['for'] = recipients
nd.append(entry)
del entry['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
extractors = [Extractor(label='taglines',
path="//div[@id='tn15content']/p",
attrs=Attribute(key='taglines', multi=True,
path="./text()"))]
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
extractors = [Extractor(label='keywords',
path="//a[starts-with(@href, '/keyword/')]",
attrs=Attribute(key='keywords',
path="./text()", multi=True,
postprocess=lambda x: \
x.lower().replace(' ', '-')))]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//ul[@class='trivia']/li",
attrs=Attribute(key='alternate versions',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//div[@class='sodatext']",
attrs=Attribute(key='trivia',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//span[@class='linksoda']"):
qLink.drop_tree()
return dom
class DOMHTMLSoundtrackParser(DOMHTMLAlternateVersionsParser):
kind = 'soundtrack'
preprocessors = [
('<br>', '\n')
]
def postprocess_data(self, data):
if 'soundtrack' in data:
nd = []
for x in data['soundtrack']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix+len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='crazy credits', path="//ul/li/tt",
attrs=Attribute(key='crazy credits', multi=True,
path=".//text()",
postprocess=lambda x: \
x.replace('\n', ' ').replace(' ', ' ')))]
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='goofs', path="//ul[@class='trivia']/li",
attrs=Attribute(key='goofs', multi=True, path=".//text()",
postprocess=lambda x: (x or u'').strip()))]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='quotes',
path="//div[@class='_imdbpy']",
attrs=Attribute(key='quotes',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip().replace(' \n',
'::').replace('::\n', '::').replace('\n', ' ')))
]
preprocessors = [
(re.compile('(<a name="?qt[0-9]{7}"?></a>)', re.I),
r'\1<div class="_imdbpy">'),
(re.compile('<hr width="30%">', re.I), '</div>'),
(re.compile('<hr/>', re.I), '</div>'),
(re.compile('<script.*?</script>', re.I|re.S), ''),
# For BeautifulSoup.
(re.compile('<!-- sid: t-channel : MIDDLE_CENTER -->', re.I), '</div>')
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//p[@class='linksoda']"):
qLink.drop_tree()
return dom
def postprocess_data(self, data):
if 'quotes' not in data:
return {}
for idx, quote in enumerate(data['quotes']):
data['quotes'][idx] = quote.split('::')
return data
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
extractors = [Extractor(label='release dates',
path="//th[@class='xxxx']/../../tr",
attrs=Attribute(key='release dates', multi=True,
path={'country': ".//td[1]//text()",
'date': ".//td[2]//text()",
'notes': ".//td[3]//text()"})),
Extractor(label='akas',
path="//div[@class='_imdbpy_akas']/table/tr",
attrs=Attribute(key='akas', multi=True,
path={'title': "./td[1]/text()",
'countries': "./td[2]/text()"}))]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data): return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date): continue
country = country.strip()
date = date.strip()
if not (country and date): continue
notes = i['notes']
info = u'%s::%s' % (country, date)
if notes:
info += notes
rl.append(info)
if releases:
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = aka.get('title', '').strip()
if not title:
continue
countries = aka.get('countries', '').split('/')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s::%s' % (title, country.strip()))
if akas:
del data['akas']
if nakas:
data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\.\s*median\s*=\s*([0-9])',
re.I)
extractors = [
Extractor(label='number of votes',
path="//td[b='Percentage']/../../tr",
attrs=[Attribute(key='votes',
multi=True,
path={
'votes': "td[1]//text()",
'ordinal': "td[3]//text()"
})]),
Extractor(label='mean and median',
path="//p[starts-with(text(), 'Arithmetic mean')]",
attrs=Attribute(key='mean and median',
path="text()")),
Extractor(label='rating',
path="//a[starts-with(@href, '/search/title?user_rating=')]",
attrs=Attribute(key='rating',
path="text()")),
Extractor(label='demographic voters',
path="//td[b='Average']/../../tr",
attrs=Attribute(key='demographic voters',
multi=True,
path={
'voters': "td[1]//text()",
'votes': "td[2]//text()",
'average': "td[3]//text()"
})),
Extractor(label='top 250',
path="//a[text()='top 250']",
attrs=Attribute(key='top 250',
path="./preceding-sibling::text()[1]"))
]
def postprocess_data(self, data):
nd = {}
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for i in xrange(1, 11):
_ordinal = int(votes[i]['ordinal'])
_strvts = votes[i]['votes'] or '0'
nd['number of votes'][_ordinal] = \
int(_strvts.replace(',', ''))
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try: am = float(am)
except (ValueError, OverflowError): pass
if type(am) is type(1.0):
nd['arithmetic mean'] = am
try: med = int(med)
except (ValueError, OverflowError): pass
if type(med) is type(0):
nd['median'] = med
if 'rating' in data:
nd['rating'] = float(data['rating'])
dem_voters = data.get('demographic voters')
if dem_voters:
nd['demographic'] = {}
for i in xrange(1, len(dem_voters)):
if (dem_voters[i]['votes'] is not None) \
and (dem_voters[i]['votes'].strip()):
nd['demographic'][dem_voters[i]['voters'].strip().lower()] \
= (int(dem_voters[i]['votes'].replace(',', '')),
float(dem_voters[i]['average']))
if 'imdb users' in nd.get('demographic', {}):
nd['votes'] = nd['demographic']['imdb users'][0]
nd['demographic']['all votes'] = nd['demographic']['imdb users']
del nd['demographic']['imdb users']
top250 = data.get('top 250')
if top250:
sd = top250[9:]
i = sd.find(' ')
if i != -1:
sd = sd[:i]
try: sd = int(sd)
except (ValueError, OverflowError): pass
if type(sd) is type(0):
nd['top 250 rank'] = sd
return nd
class DOMHTMLEpisodesRatings(DOMParserBase):
"""Parser for the "episode ratings ... by date" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
erparser = DOMHTMLEpisodesRatings()
result = erparser.parse(eprating_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title', path="//title",
attrs=Attribute(key='title', path="./text()")),
Extractor(label='ep ratings',
path="//th/../..//tr",
attrs=Attribute(key='episodes', multi=True,
path={'nr': ".//td[1]/text()",
'ep title': ".//td[2]//text()",
'movieID': ".//td[2]/a/@href",
'rating': ".//td[3]/text()",
'votes': ".//td[4]/text()"}))]
def postprocess_data(self, data):
if 'title' not in data or 'episodes' not in data: return {}
nd = []
title = data['title']
for i in data['episodes']:
ept = i['ep title']
movieID = analyze_imdbid(i['movieID'])
votes = i['votes']
rating = i['rating']
if not (ept and movieID and votes and rating): continue
try:
votes = int(votes.replace(',', '').replace('.', ''))
except:
pass
try:
rating = float(rating)
except:
pass
ept = ept.strip()
ept = u'%s {%s' % (title, ept)
nr = i['nr']
if nr:
ept += u' (#%s)' % nr.strip()
ept += '}'
if movieID is not None:
movieID = str(movieID)
m = Movie(title=ept, movieID=movieID, accessSystem=self._as,
modFunct=self._modFunct)
epofdict = m.get('episode of')
if epofdict is not None:
m['episode of'] = Movie(data=epofdict, accessSystem=self._as,
modFunct=self._modFunct)
nd.append({'episode': m, 'votes': votes, 'rating': rating})
return {'episodes rating': nd}
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'): href = href[1:]
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews", "newsgroup
reviews", "miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'official sites'
extractors = [
Extractor(label='site',
path="//ol/li/a",
attrs=Attribute(key='self.kind',
multi=True,
path={
'link': "./@href",
'info': "./text()"
},
postprocess=lambda x: (x.get('info').strip(),
urllib.unquote(_normalize_href(x.get('link'))))))
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='connection',
group="//div[@class='_imdbpy']",
group_key="./h5/text()",
group_key_normalize=lambda x: x.lower(),
path="./a",
attrs=Attribute(key=None,
path={'title': "./text()",
'movieID': "./@href"},
multi=True))]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in data.keys():
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = u''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title,
movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes,
modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data: return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
extractors = [Extractor(label='locations', path="//dt",
attrs=Attribute(key='locations', multi=True,
path={'place': ".//text()",
'note': "./following-sibling::dd[1]" \
"//text()"},
postprocess=lambda x: (u'%s::%s' % (
x['place'].strip(),
(x['note'] or u'').strip())).strip(':')))]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "business", "literature",
"publicity" (for people) and "contacts (for people) pages of
a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = HTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
extractors = [Extractor(label='tech',
group="//h5",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip()
for t in x.split('\n') if t.strip()]))]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I),
r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
# this is for splitting individual entries
(re.compile('<br/>', re.I), r'\n'),
]
def postprocess_data(self, data):
for key in data:
data[key] = filter(None, data[key])
if self.kind in ('literature', 'business', 'contacts') and data:
if 'screenplay/teleplay' in data:
data['screenplay-teleplay'] = data['screenplay/teleplay']
del data['screenplay/teleplay']
data = {self.kind: data}
else:
if self.kind == 'publicity':
if 'biography (print)' in data:
data['biography-print'] = data['biography (print)']
del data['biography (print)']
# Tech info.
for key in data.keys():
if key.startswith('film negative format'):
data['film negative format'] = data[key]
del data[key]
elif key.startswith('film length'):
data['film length'] = data[key]
del data[key]
return data
class DOMHTMLDvdParser(DOMParserBase):
"""Parser for the "dvd" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
dparser = DOMHTMLDvdParser()
result = dparser.parse(dvd_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='dvd',
path="//div[@class='base_layer']",
attrs=[Attribute(key=None,
multi=True,
path={
'title': "../table[1]//h3/text()",
'cover': "../table[1]//img/@src",
'region': ".//p[b='Region:']/text()",
'asin': ".//p[b='ASIN:']/text()",
'upc': ".//p[b='UPC:']/text()",
'rating': ".//p/b[starts-with(text(), 'Rating:')]/../img/@alt",
'certificate': ".//p[b='Certificate:']/text()",
'runtime': ".//p[b='Runtime:']/text()",
'label': ".//p[b='Label:']/text()",
'studio': ".//p[b='Studio:']/text()",
'release date': ".//p[b='Release Date:']/text()",
'dvd format': ".//p[b='DVD Format:']/text()",
'dvd features': ".//p[b='DVD Features: ']//text()",
'supplements': "..//div[span='Supplements']" \
"/following-sibling::div[1]//text()",
'review': "..//div[span='Review']/following-sibling::div[1]//text()",
'titles': "..//div[starts-with(text(), 'Titles in this Product')]" \
"/..//text()",
},
postprocess=lambda x: {
'title': (x.get('title') or u'').strip(),
'cover': (x.get('cover') or u'').strip(),
'region': (x.get('region') or u'').strip(),
'asin': (x.get('asin') or u'').strip(),
'upc': (x.get('upc') or u'').strip(),
'rating': (x.get('rating') or u'Not Rated').strip().replace('Rating: ', ''),
'certificate': (x.get('certificate') or u'').strip(),
'runtime': (x.get('runtime') or u'').strip(),
'label': (x.get('label') or u'').strip(),
'studio': (x.get('studio') or u'').strip(),
'release date': (x.get('release date') or u'').strip(),
'dvd format': (x.get('dvd format') or u'').strip(),
'dvd features': (x.get('dvd features') or u'').strip().replace('DVD Features: ', ''),
'supplements': (x.get('supplements') or u'').strip(),
'review': (x.get('review') or u'').strip(),
'titles in this product': (x.get('titles') or u'').strip().replace('Titles in this Product::', ''),
}
)])]
preprocessors = [
(re.compile('<p>(<table class="dvd_section" .*)</p>\s*<hr\s*/>', re.I),
r'<div class="_imdbpy">\1</div>'),
(re.compile('<p>(<div class\s*=\s*"base_layer")', re.I), r'\1'),
(re.compile('</p>\s*<p>(<div class="dvd_section")', re.I), r'\1'),
(re.compile('</div><div class="dvd_row(_alt)?">', re.I), r'::')
]
def postprocess_data(self, data):
if not data:
return data
dvds = data['dvd']
for dvd in dvds:
if dvd['cover'].find('noposter') != -1:
del dvd['cover']
for key in dvd.keys():
if not dvd[key]:
del dvd[key]
if 'supplements' in dvd:
dvd['supplements'] = dvd['supplements'].split('::')
return data
class DOMHTMLRecParser(DOMParserBase):
"""Parser for the "recommendations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = HTMLRecParser()
result = rparser.parse(recommendations_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='recommendations',
path="//td[@valign='middle'][1]",
attrs=Attribute(key='../../tr/td[1]//text()',
multi=True,
path={'title': ".//text()",
'movieID': ".//a/@href"}))]
def postprocess_data(self, data):
for key in data.keys():
n_key = key
n_keyl = n_key.lower()
if n_keyl == 'suggested by the database':
n_key = 'database'
elif n_keyl == 'imdb users recommend':
n_key = 'users'
data[n_key] = [Movie(title=x['title'],
movieID=analyze_imdbid(x['movieID']),
accessSystem=self._as, modFunct=self._modFunct)
for x in data[key]]
del data[key]
if data: return {'recommendations': data}
return data
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='news',
path="//h2",
attrs=Attribute(key='news',
multi=True,
path={
'title': "./text()",
'fromdate': "../following-sibling::p[1]/small//text()",
# FIXME: sometimes (see The Matrix (1999)) <p> is found
# inside news text.
'body': "../following-sibling::p[2]//text()",
'link': "../..//a[text()='Permalink']/@href",
'fulllink': "../..//a[starts-with(text(), " \
"'See full article at')]/@href"
},
postprocess=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ',
'').strip(),
'body': (x.get('body') or u'').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}))
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if not data.has_key('news'):
return {}
for news in data['news']:
if news.has_key('full article link'):
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':': title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':': kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLAmazonReviewsParser(DOMParserBase):
"""Parser for the "amazon reviews" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
arparser = DOMHTMLAmazonReviewsParser()
result = arparser.parse(amazonreviews_html_string)
"""
extractors = [
Extractor(label='amazon reviews',
group="//h3",
group_key="./a/text()",
group_key_normalize=lambda x: x[:-1],
path="./following-sibling::p[1]/span[@class='_review']",
attrs=Attribute(key=None,
multi=True,
path={
'title': "../preceding-sibling::h3[1]/a[1]/text()",
'link': "../preceding-sibling::h3[1]/a[1]/@href",
'kind': "./preceding-sibling::b[1]/text()",
'item': "./i/b/text()",
'review': ".//text()",
'author': "./i[starts-with(text(), '--')]/text()"
},
postprocess=_parse_review))
]
preprocessors = [
(re.compile('<p>\n(?!<b>)', re.I), r'\n'),
(re.compile('(\n</b>\n)', re.I), r'\1<span class="_review">'),
(re.compile('(</p>\n\n)', re.I), r'</span>\1'),
(re.compile('(\s\n)(<i><b>)', re.I), r'</span>\1<span class="_review">\2')
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for item in data.keys():
nd = nd + data[item]
return {'amazon reviews': nd}
def _parse_merchandising_link(x):
result = {}
link = x.get('link')
result['link'] = _normalize_href(link)
text = x.get('text')
if text is not None:
result['link-text'] = text.strip()
cover = x.get('cover')
if cover is not None:
result['cover'] = cover
description = x.get('description')
if description is not None:
shop = x.get('shop')
if shop is not None:
result['description'] = u'%s::%s' % (shop, description.strip())
else:
result['description'] = description.strip()
return result
class DOMHTMLSalesParser(DOMParserBase):
"""Parser for the "merchandising links" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSalesParser()
result = sparser.parse(sales_html_string)
"""
extractors = [
Extractor(label='shops',
group="//h5/a[@name]/..",
group_key="./a[1]/text()",
group_key_normalize=lambda x: x.lower(),
path=".//following-sibling::table[1]/" \
"/td[@class='w_rowtable_colshop']//tr[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./td[2]/a[1]/@href",
'text': "./td[1]/img[1]/@alt",
'cover': "./ancestor::td[1]/../td[1]"\
"/a[1]/img[1]/@src",
},
postprocess=_parse_merchandising_link)),
Extractor(label='others',
group="//span[@class='_info']/..",
group_key="./h5/a[1]/text()",
group_key_normalize=lambda x: x.lower(),
path="./span[@class='_info']",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./preceding-sibling::a[1]/@href",
'shop': "./preceding-sibling::a[1]/text()",
'description': ".//text()",
},
postprocess=_parse_merchandising_link))
]
preprocessors = [
(re.compile('(<h5><a name=)', re.I), r'</div><div class="_imdbpy">\1'),
(re.compile('(</h5>\n<br/>\n)</div>', re.I), r'\1'),
(re.compile('(<br/><br/>\n)(\n)', re.I), r'\1</div>\2'),
(re.compile('(\n)(Search.*?)(</a>)(\n)', re.I), r'\3\1\2\4'),
(re.compile('(\n)(Search.*?)(\n)', re.I),
r'\1<span class="_info">\2</span>\3')
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
return {'merchandising links': data}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = u'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown': year = u'????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
_containsObjects = True
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.extractors = [
Extractor(label='series',
path="//html",
attrs=[Attribute(key='series title',
path=".//title/text()"),
Attribute(key='series movieID',
path=".//h1/a[@class='main']/@href",
postprocess=analyze_imdbid)
]),
Extractor(label='episodes',
group="//div[@class='_imdbpy']/h3",
group_key="./a/@name",
path=self._episodes_path,
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a/@href",
'title': "./a/text()",
'year': "./preceding-sibling::a[1]/@name",
'episode': "./text()[1]",
'oad': self._oad_path,
'plot': "./following-sibling::text()[1]"
},
postprocess=_build_episode))]
if self.kind == 'episodes cast':
self.extractors += [
Extractor(label='cast',
group="//h4",
group_key="./text()[1]",
group_key_normalize=lambda x: x.strip(),
path="./following-sibling::table[1]//td[@class='nm']",
attrs=Attribute(key=None,
multi=True,
path={'person': "..//text()",
'link': "./a/@href",
'roleID': \
"../td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'),
accessSystem=self._as,
modFunct=self._modFunct)))
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I),
r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if not 'series title' in data: return {}
if not 'series movieID' in data: return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle: return {}
seriesID = data['series movieID']
if seriesID is None: return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in data.keys():
if key.startswith('season-'):
season_key = key[7:]
try: season_key = int(season_key)
except: pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode: continue
episode_key = episode.get('episode')
if episode_key is None: continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key,
episode_key)
if data.has_key(cast_key):
cast = data[cast_key]
for i in xrange(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLEpisodesCastParser(DOMHTMLEpisodesParser):
"""Parser for the "episodes cast" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
kind = 'episodes cast'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::b[1]/text()"
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
# XXX: bsoup and lxml don't match (looks like a minor issue, anyway).
extractors = [
Extractor(label='faqs',
path="//div[@class='section']",
attrs=Attribute(key='faqs',
multi=True,
path={
'question': "./h3/a/span/text()",
'answer': "../following-sibling::div[1]//text()"
},
postprocess=lambda x: u'%s::%s' % (x.get('question').strip(),
'\n\n'.join(x.get('answer').replace(
'\n\n', '\n').strip().split('||')))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='series title',
path="//title",
attrs=Attribute(key='series title', path="./text()",
postprocess=lambda x: \
x.replace(' - TV schedule', u''))),
Extractor(label='series id',
path="//h1/a[@href]",
attrs=Attribute(key='series id', path="./@href")),
Extractor(label='tv airings',
path="//tr[@class]",
attrs=Attribute(key='airing',
multi=True,
path={
'date': "./td[1]//text()",
'time': "./td[2]//text()",
'channel': "./td[3]//text()",
'link': "./td[4]/a[1]/@href",
'title': "./td[4]//text()",
'season': "./td[5]//text()",
},
postprocess=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data['series title']
seriesID = analyze_imdbid(data['series id'])
if data.has_key('airing'):
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = filter(None, data['airing'])
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLSynopsisParser(DOMParserBase):
"""Parser for the "synopsis" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = HTMLSynopsisParser()
result = sparser.parse(synopsis_html_string)
"""
extractors = [
Extractor(label='synopsis',
path="//div[@class='display'][not(@style)]",
attrs=Attribute(key='synopsis',
path=".//text()",
postprocess=lambda x: '\n\n'.join(x.strip().split('||'))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
extractors = [
Extractor(label='parents guide',
group="//div[@class='section']",
group_key="./h3/a/span/text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::div[1]/p",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip().replace('\n', ' ')
for t in x.split('||') if t.strip()]))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
def postprocess_data(self, data):
data2 = {}
for key in data:
if data[key]:
data2[key] = data[key]
if not data2:
return {}
return {'parents guide': data2}
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), {'kind': 'soundtrack'}),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'external reviews'}),
'newsgrouprev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'newsgroup reviews'}),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'misc links'}),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'sound clips'}),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'video clips'}),
'photosites_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'photo sites'}),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'business_parser': ((DOMHTMLTechParser,),
{'kind': 'business', '_defGetRefs': 1}),
'literature_parser': ((DOMHTMLTechParser,), {'kind': 'literature'}),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'dvd_parser': ((DOMHTMLDvdParser,), None),
'rec_parser': ((DOMHTMLRecParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'amazonrev_parser': ((DOMHTMLAmazonReviewsParser,), None),
'sales_parser': ((DOMHTMLSalesParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'episodes_cast_parser': ((DOMHTMLEpisodesCastParser,), None),
'eprating_parser': ((DOMHTMLEpisodesRatings,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'synopsis_parser': ((DOMHTMLSynopsisParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
| [
[
8,
0,
0.0074,
0.0142,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0153,
0.0005,
0,
0.66,
0.0196,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0158,
0.0005,
0,
0.66... | [
"\"\"\"\nparser.http.movieParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse the\nIMDb pages on the akas.imdb.com server about a movie.\nE.g., for Brian De Palma's \"The Untouchables\", the referred\npages would be:\n combined details: http://akas.imdb.com/titl... |
"""
parser.http.personParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a person.
E.g., for "Mel Gibson" the referred pages would be:
categorized: http://akas.imdb.com/name/nm0000154/maindetails
biography: http://akas.imdb.com/name/nm0000154/bio
...and so on...
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.Movie import Movie
from imdb.utils import analyze_name, canonicalName, normalizeName, \
analyze_title, date_and_notes
from utils import build_movie, DOMParserBase, Attribute, Extractor, \
analyze_imdbid
from movieParser import _manageRoles
_reRoles = re.compile(r'(<li>.*? \.\.\.\. )(.*?)(</li>|<br>)',
re.I | re.M | re.S)
def build_date(date):
day = date.get('day')
year = date.get('year')
if day and year:
return "%s %s" % (day, year)
if day:
return day
if year:
return year
return ""
class DOMHTMLMaindetailsParser(DOMParserBase):
"""Parser for the "categorized" (maindetails) page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
cparser = DOMHTMLMaindetailsParser()
result = cparser.parse(categorized_html_string)
"""
_containsObjects = True
_birth_attrs = [Attribute(key='birth date',
path={
'day': ".//a[starts-with(@href, " \
"'/date/')]/text()",
'year': ".//a[starts-with(@href, " \
"'/search/name?birth_year=')]/text()"
},
postprocess=build_date),
Attribute(key='birth place',
path=".//a[starts-with(@href, " \
"'/search/name?birth_place=')]/text()")]
_death_attrs = [Attribute(key='death date',
path={
'day': ".//a[starts-with(@href, " \
"'/date/')]/text()",
'year': ".//a[starts-with(@href, " \
"'/search/name?death_year=')]/text()"
},
postprocess=build_date),
Attribute(key='death place',
path=".//a[starts-with(@href, " \
"'/search/name?death_place=')]/text()")]
_film_attrs = [Attribute(key=None,
multi=True,
path={
'link': "./b/a[1]/@href",
'title': "./b/a[1]/text()",
'notes': "./b/following-sibling::text()",
'year': "./span[@class='year_column']/text()",
'status': "./a[@class='in_production']/text()",
'rolesNoChar': './/br/following-sibling::text()',
'chrRoles': "./a[@imdbpyname]/@imdbpyname",
'roleID': "./a[starts-with(@href, '/character/')]/@href"
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
year=x.get('year'),
movieID=analyze_imdbid(x.get('link') or u''),
rolesNoChar=(x.get('rolesNoChar') or u'').strip(),
chrRoles=(x.get('chrRoles') or u'').strip(),
additionalNotes=x.get('notes'),
roleID=(x.get('roleID') or u''),
status=x.get('status') or None))]
extractors = [
Extractor(label='name',
path="//h1[@class='header']",
attrs=Attribute(key='name',
path=".//text()",
postprocess=lambda x: analyze_name(x,
canonical=1))),
Extractor(label='birth info',
path="//div[h4='Born:']",
attrs=_birth_attrs),
Extractor(label='death info',
path="//div[h4='Died:']",
attrs=_death_attrs),
Extractor(label='headshot',
path="//td[@id='img_primary']/a",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='akas',
path="//div[h4='Alternate Names:']",
attrs=Attribute(key='akas',
path="./text()",
postprocess=lambda x: x.strip().split(' '))),
Extractor(label='filmography',
group="//div[starts-with(@id, 'filmo-head-')]",
group_key="./a[@name]/text()",
group_key_normalize=lambda x: x.lower().replace(': ', ' '),
path="./following-sibling::div[1]" \
"/div[starts-with(@class, 'filmo-row')]",
attrs=_film_attrs),
Extractor(label='indevelopment',
path="//div[starts-with(@class,'devitem')]",
attrs=Attribute(key='in development',
multi=True,
path={
'link': './a/@href',
'title': './a/text()'
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
movieID=analyze_imdbid(x.get('link') or u''),
roleID=(x.get('roleID') or u'').split('/'),
status=x.get('status') or None)))
]
preprocessors = [('<div class="clear"/> </div>', ''),
('<br/>', '<br />'),
(re.compile(r'(<a href="/character/ch[0-9]{7}")>(.*?)</a>'),
r'\1 imdbpyname="\2@@">\2</a>')]
def postprocess_data(self, data):
for what in 'birth date', 'death date':
if what in data and not data[what]:
del data[what]
# XXX: the code below is for backwards compatibility
# probably could be removed
for key in data.keys():
if key.startswith('actor '):
if not data.has_key('actor'):
data['actor'] = []
data['actor'].extend(data[key])
del data[key]
if key.startswith('actress '):
if not data.has_key('actress'):
data['actress'] = []
data['actress'].extend(data[key])
del data[key]
if key.startswith('self '):
if not data.has_key('self'):
data['self'] = []
data['self'].extend(data[key])
del data[key]
if key == 'birth place':
data['birth notes'] = data[key]
del data[key]
if key == 'death place':
data['death notes'] = data[key]
del data[key]
return data
class DOMHTMLBioParser(DOMParserBase):
"""Parser for the "biography" page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bioparser = DOMHTMLBioParser()
result = bioparser.parse(biography_html_string)
"""
_defGetRefs = True
_birth_attrs = [Attribute(key='birth date',
path={
'day': "./a[starts-with(@href, " \
"'/date/')]/text()",
'year': "./a[starts-with(@href, " \
"'/search/name?birth_year=')]/text()"
},
postprocess=build_date),
Attribute(key='birth notes',
path="./a[starts-with(@href, " \
"'/search/name?birth_place=')]/text()")]
_death_attrs = [Attribute(key='death date',
path={
'day': "./a[starts-with(@href, " \
"'/date/')]/text()",
'year': "./a[starts-with(@href, " \
"'/search/name?death_date=')]/text()"
},
postprocess=build_date),
Attribute(key='death notes',
path="./text()",
# TODO: check if this slicing is always correct
postprocess=lambda x: u''.join(x).strip()[2:])]
extractors = [
Extractor(label='headshot',
path="//a[@name='headshot']",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='birth info',
path="//div[h5='Date of Birth']",
attrs=_birth_attrs),
Extractor(label='death info',
path="//div[h5='Date of Death']",
attrs=_death_attrs),
Extractor(label='nick names',
path="//div[h5='Nickname']",
attrs=Attribute(key='nick names',
path="./text()",
joiner='|',
postprocess=lambda x: [n.strip().replace(' (',
'::(', 1) for n in x.split('|')
if n.strip()])),
Extractor(label='birth name',
path="//div[h5='Birth Name']",
attrs=Attribute(key='birth name',
path="./text()",
postprocess=lambda x: canonicalName(x.strip()))),
Extractor(label='height',
path="//div[h5='Height']",
attrs=Attribute(key='height',
path="./text()",
postprocess=lambda x: x.strip())),
Extractor(label='mini biography',
path="//div[h5='Mini Biography']",
attrs=Attribute(key='mini biography',
multi=True,
path={
'bio': "./p//text()",
'by': "./b/following-sibling::a/text()"
},
postprocess=lambda x: "%s::%s" % \
(x.get('bio').strip(),
(x.get('by') or u'').strip() or u'Anonymous'))),
Extractor(label='spouse',
path="//div[h5='Spouse']/table/tr",
attrs=Attribute(key='spouse',
multi=True,
path={
'name': "./td[1]//text()",
'info': "./td[2]//text()"
},
postprocess=lambda x: ("%s::%s" % \
(x.get('name').strip(),
(x.get('info') or u'').strip())).strip(':'))),
Extractor(label='trade mark',
path="//div[h5='Trade Mark']/p",
attrs=Attribute(key='trade mark',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='trivia',
path="//div[h5='Trivia']/p",
attrs=Attribute(key='trivia',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='quotes',
path="//div[h5='Personal Quotes']/p",
attrs=Attribute(key='quotes',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='salary',
path="//div[h5='Salary']/table/tr",
attrs=Attribute(key='salary history',
multi=True,
path={
'title': "./td[1]//text()",
'info': "./td[2]/text()",
},
postprocess=lambda x: "%s::%s" % \
(x.get('title').strip(),
x.get('info').strip()))),
Extractor(label='where now',
path="//div[h5='Where Are They Now']/p",
attrs=Attribute(key='where now',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
]
preprocessors = [
(re.compile('(<h5>)', re.I), r'</div><div class="_imdbpy">\1'),
(re.compile('(</table>\n</div>\s+)</div>', re.I + re.DOTALL), r'\1'),
(re.compile('(<div id="tn15bot">)'), r'</div>\1'),
(re.compile('\.<br><br>([^\s])', re.I), r'. \1')
]
def postprocess_data(self, data):
for what in 'birth date', 'death date':
if what in data and not data[what]:
del data[what]
return data
class DOMHTMLOtherWorksParser(DOMParserBase):
"""Parser for the "other works" and "agent" pages of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
owparser = DOMHTMLOtherWorksParser()
result = owparser.parse(otherworks_html_string)
"""
_defGetRefs = True
kind = 'other works'
# XXX: looks like the 'agent' page is no more public.
extractors = [
Extractor(label='other works',
path="//h5[text()='Other works']/" \
"following-sibling::div[1]",
attrs=Attribute(key='self.kind',
path=".//text()",
postprocess=lambda x: x.strip().split('\n\n')))
]
preprocessors = [
(re.compile('(<h5>[^<]+</h5>)', re.I),
r'</div>\1<div class="_imdbpy">'),
(re.compile('(</table>\n</div>\s+)</div>', re.I), r'\1'),
(re.compile('(<div id="tn15bot">)'), r'</div>\1'),
(re.compile('<br/><br/>', re.I), r'\n\n')
]
def _build_episode(link, title, minfo, role, roleA, roleAID):
"""Build an Movie object for a given episode of a series."""
episode_id = analyze_imdbid(link)
notes = u''
minidx = minfo.find(' -')
# Sometimes, for some unknown reason, the role is left in minfo.
if minidx != -1:
slfRole = minfo[minidx+3:].lstrip()
minfo = minfo[:minidx].rstrip()
if slfRole.endswith(')'):
commidx = slfRole.rfind('(')
if commidx != -1:
notes = slfRole[commidx:]
slfRole = slfRole[:commidx]
if slfRole and role is None and roleA is None:
role = slfRole
eps_data = analyze_title(title)
eps_data['kind'] = u'episode'
# FIXME: it's wrong for multiple characters (very rare on tv series?).
if role is None:
role = roleA # At worse, it's None.
if role is None:
roleAID = None
if roleAID is not None:
roleAID = analyze_imdbid(roleAID)
e = Movie(movieID=episode_id, data=eps_data, currentRole=role,
roleID=roleAID, notes=notes)
# XXX: are we missing some notes?
# XXX: does it parse things as "Episode dated 12 May 2005 (12 May 2005)"?
if minfo.startswith('('):
pe = minfo.find(')')
if pe != -1:
date = minfo[1:pe]
if date != '????':
e['original air date'] = date
if eps_data.get('year', '????') == '????':
syear = date.split()[-1]
if syear.isdigit():
e['year'] = int(syear)
return e
class DOMHTMLSeriesParser(DOMParserBase):
"""Parser for the "by TV series" page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSeriesParser()
result = sparser.parse(filmoseries_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='series',
group="//div[@class='filmo']/span[1]",
group_key="./a[1]",
path="./following-sibling::ol[1]/li/a[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./@href",
'title': "./text()",
'info': "./following-sibling::text()",
'role': "./following-sibling::i[1]/text()",
'roleA': "./following-sibling::a[1]/text()",
'roleAID': "./following-sibling::a[1]/@href"
},
postprocess=lambda x: _build_episode(x.get('link'),
x.get('title'),
(x.get('info') or u'').strip(),
x.get('role'),
x.get('roleA'),
x.get('roleAID'))))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = {}
for key in data.keys():
dom = self.get_dom(key)
link = self.xpath(dom, "//a/@href")[0]
title = self.xpath(dom, "//a/text()")[0][1:-1]
series = Movie(movieID=analyze_imdbid(link),
data=analyze_title(title),
accessSystem=self._as, modFunct=self._modFunct)
nd[series] = []
for episode in data[key]:
# XXX: should we create a copy of 'series', to avoid
# circular references?
episode['episode of'] = series
nd[series].append(episode)
return {'episodes': nd}
class DOMHTMLPersonGenresParser(DOMParserBase):
"""Parser for the "by genre" and "by keywords" pages of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLPersonGenresParser()
result = gparser.parse(bygenre_html_string)
"""
kind = 'genres'
_containsObjects = True
extractors = [
Extractor(label='genres',
group="//b/a[@name]/following-sibling::a[1]",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../../following-sibling::ol[1]/li//a[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./@href",
'title': "./text()",
'info': "./following-sibling::text()"
},
postprocess=lambda x: \
build_movie(x.get('title') + \
x.get('info').split('[')[0],
analyze_imdbid(x.get('link')))))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
return {self.kind: data}
from movieParser import _parse_merchandising_link
class DOMHTMLPersonSalesParser(DOMParserBase):
"""Parser for the "merchandising links" page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLPersonSalesParser()
result = sparser.parse(sales_html_string)
"""
extractors = [
Extractor(label='merchandising links',
group="//span[@class='merch_title']",
group_key=".//text()",
path="./following-sibling::table[1]/" \
"/td[@class='w_rowtable_colshop']//tr[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./td[2]/a[1]/@href",
'text': "./td[1]/img[1]/@alt",
'cover': "./ancestor::td[1]/../" \
"td[1]/a[1]/img[1]/@src",
},
postprocess=_parse_merchandising_link)),
]
preprocessors = [
(re.compile('(<a name="[^"]+" )/>', re.I), r'\1></a>')
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
return {'merchandising links': data}
from movieParser import DOMHTMLTechParser
from movieParser import DOMHTMLOfficialsitesParser
from movieParser import DOMHTMLAwardsParser
from movieParser import DOMHTMLNewsParser
_OBJECTS = {
'maindetails_parser': ((DOMHTMLMaindetailsParser,), None),
'bio_parser': ((DOMHTMLBioParser,), None),
'otherworks_parser': ((DOMHTMLOtherWorksParser,), None),
#'agent_parser': ((DOMHTMLOtherWorksParser,), {'kind': 'agent'}),
'person_officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'person_awards_parser': ((DOMHTMLAwardsParser,), {'subject': 'name'}),
'publicity_parser': ((DOMHTMLTechParser,), {'kind': 'publicity'}),
'person_series_parser': ((DOMHTMLSeriesParser,), None),
'person_contacts_parser': ((DOMHTMLTechParser,), {'kind': 'contacts'}),
'person_genres_parser': ((DOMHTMLPersonGenresParser,), None),
'person_keywords_parser': ((DOMHTMLPersonGenresParser,),
{'kind': 'keywords'}),
'news_parser': ((DOMHTMLNewsParser,), None),
'sales_parser': ((DOMHTMLPersonSalesParser,), None)
}
| [
[
8,
0,
0.025,
0.0483,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0519,
0.0018,
0,
0.66,
0.05,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0537,
0.0018,
0,
0.66,
... | [
"\"\"\"\nparser.http.personParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse\nthe IMDb pages on the akas.imdb.com server about a person.\nE.g., for \"Mel Gibson\" the referred pages would be:\n categorized: http://akas.imdb.com/name/nm0000154/maindetails\n ... |
"""
parser.http.searchMovieParser module (imdb package).
This module provides the HTMLSearchMovieParser class (and the
search_movie_parser instance), used to parse the results of a search
for a given title.
E.g., for when searching for the title "the passion", the parsed
page would be:
http://akas.imdb.com/find?q=the+passion&tt=on&mx=20
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_title, build_title
from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
class DOMBasicMovieParser(DOMParserBase):
"""Simply get the title of a movie and the imdbID.
It's used by the DOMHTMLSearchMovieParser class to return a result
for a direct match (when a search on IMDb results in a single
movie, the web server sends directly the movie page."""
# Stay generic enough to be used also for other DOMBasic*Parser classes.
_titleAttrPath = ".//text()"
_linkPath = "//link[@rel='canonical']"
_titleFunct = lambda self, x: analyze_title(x or u'')
def _init(self):
self.preprocessors += [('<span class="tv-extra">TV mini-series</span>',
'<span class="tv-extra">(mini)</span>')]
self.extractors = [Extractor(label='title',
path="//h1",
attrs=Attribute(key='title',
path=self._titleAttrPath,
postprocess=self._titleFunct)),
Extractor(label='link',
path=self._linkPath,
attrs=Attribute(key='link', path="./@href",
postprocess=lambda x: \
analyze_imdbid((x or u'').replace(
'http://pro.imdb.com', ''))
))]
# Remove 'More at IMDb Pro' links.
preprocessors = [(re.compile(r'<span class="pro-link".*?</span>'), ''),
(re.compile(r'<a href="http://ad.doubleclick.net.*?;id=(co[0-9]{7});'), r'<a href="http://pro.imdb.com/company/\1"></a>< a href="')]
def postprocess_data(self, data):
if not 'link' in data:
data = []
else:
link = data.pop('link')
if (link and data):
data = [(link, data)]
else:
data = []
return data
def custom_analyze_title(title):
"""Remove garbage notes after the (year), (year/imdbIndex) or (year) (TV)"""
# XXX: very crappy. :-(
nt = title.split(' ')[0]
if nt:
title = nt
if not title:
return {}
return analyze_title(title)
# Manage AKAs.
_reAKAStitles = re.compile(r'(?:aka) <em>"(.*?)(<br>|<\/td>)', re.I | re.M)
class DOMHTMLSearchMovieParser(DOMParserBase):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for movies."""
_BaseParser = DOMBasicMovieParser
_notDirectHitTitle = '<title>imdb title'
_titleBuilder = lambda self, x: build_title(x)
_linkPrefix = '/title/tt'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'info': ".//text()",
#'akas': ".//div[@class='_imdbpyAKA']//text()"
'akas': ".//p[@class='find-aka']//text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
custom_analyze_title(x.get('info') or u''),
x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, '/title/tt')]/..",
attrs=_attrs)]
def _init(self):
self.url = u''
def _reset(self):
self.url = u''
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:1024].lower():
if self._linkPrefix == '/title/tt':
# Only for movies.
html_string = html_string.replace('(TV mini-series)', '(mini)')
html_string = html_string.replace('<p class="find-aka">',
'<p class="find-aka">::')
#html_string = _reAKAStitles.sub(
# r'<div class="_imdbpyAKA">\1::</div>\2', html_string)
return html_string
# Direct hit!
dbme = self._BaseParser(useModule=self._useModule)
res = dbme.parse(html_string, url=self.url)
if not res: return u''
res = res['data']
if not (res and res[0]): return u''
link = '%s%s' % (self._linkPrefix, res[0][0])
# # Tries to cope with companies for which links to pro.imdb.com
# # are missing.
# link = self.url.replace(imdbURL_base[:-1], '')
title = self._titleBuilder(res[0][1])
if not (link and title): return u''
link = link.replace('http://pro.imdb.com', '')
new_html = '<td></td><td></td><td><a href="%s">%s</a></td>' % (link,
title)
return new_html
def postprocess_data(self, data):
if not data.has_key('data'):
data['data'] = []
results = getattr(self, 'results', None)
if results is not None:
data['data'][:] = data['data'][:results]
# Horrible hack to support AKAs.
if data and data['data'] and len(data['data'][0]) == 3 and \
isinstance(data['data'][0], tuple):
for idx, datum in enumerate(data['data']):
if not isinstance(datum, tuple):
continue
if datum[2] is not None:
akas = filter(None, datum[2].split('::'))
if self._linkPrefix == '/title/tt':
akas = [a.replace('" - ', '::').rstrip() for a in akas]
akas = [a.replace('aka "', '', 1).lstrip() for a in akas]
datum[1]['akas'] = akas
data['data'][idx] = (datum[0], datum[1])
else:
data['data'][idx] = (datum[0], datum[1])
return data
def add_refs(self, data):
return data
_OBJECTS = {
'search_movie_parser': ((DOMHTMLSearchMovieParser,), None)
}
| [
[
8,
0,
0.0787,
0.1517,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1629,
0.0056,
0,
0.66,
0.125,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.1685,
0.0056,
0,
0.66,... | [
"\"\"\"\nparser.http.searchMovieParser module (imdb package).\n\nThis module provides the HTMLSearchMovieParser class (and the\nsearch_movie_parser instance), used to parse the results of a search\nfor a given title.\nE.g., for when searching for the title \"the passion\", the parsed\npage would be:",
"import re"... |
"""
parser.http.topBottomParser module (imdb package).
This module provides the classes (and the instances), used to parse the
lists of top 250 and bottom 100 movies.
E.g.:
http://akas.imdb.com/chart/top
http://akas.imdb.com/chart/bottom
Copyright 2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_title
from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
class DOMHTMLTop250Parser(DOMParserBase):
"""Parser for the "top 250" page.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTop250Parser()
result = tparser.parse(top250_html_string)
"""
label = 'top 250'
ranktext = 'top 250 rank'
def _init(self):
self.extractors = [Extractor(label=self.label,
path="//div[@id='main']//table//tr",
attrs=Attribute(key=None,
multi=True,
path={self.ranktext: "./td[1]//text()",
'rating': "./td[2]//text()",
'title': "./td[3]//text()",
'movieID': "./td[3]//a/@href",
'votes': "./td[4]//text()"
}))]
def postprocess_data(self, data):
if not data or self.label not in data:
return []
mlist = []
data = data[self.label]
# Avoid duplicates. A real fix, using XPath, is auspicabile.
# XXX: probably this is no more needed.
seenIDs = []
for d in data:
if 'movieID' not in d: continue
if self.ranktext not in d: continue
if 'title' not in d: continue
theID = analyze_imdbid(d['movieID'])
if theID is None:
continue
theID = str(theID)
if theID in seenIDs:
continue
seenIDs.append(theID)
minfo = analyze_title(d['title'])
try: minfo[self.ranktext] = int(d[self.ranktext].replace('.', ''))
except: pass
if 'votes' in d:
try: minfo['votes'] = int(d['votes'].replace(',', ''))
except: pass
if 'rating' in d:
try: minfo['rating'] = float(d['rating'])
except: pass
mlist.append((theID, minfo))
return mlist
class DOMHTMLBottom100Parser(DOMHTMLTop250Parser):
"""Parser for the "bottom 100" page.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLBottom100Parser()
result = tparser.parse(bottom100_html_string)
"""
label = 'bottom 100'
ranktext = 'bottom 100 rank'
_OBJECTS = {
'top250_parser': ((DOMHTMLTop250Parser,), None),
'bottom100_parser': ((DOMHTMLBottom100Parser,), None)
}
| [
[
8,
0,
0.1226,
0.2358,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2547,
0.0094,
0,
0.66,
0.2,
896,
0,
1,
0,
0,
896,
0,
0
],
[
1,
0,
0.2642,
0.0094,
0,
0.66,
... | [
"\"\"\"\nparser.http.topBottomParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse the\nlists of top 250 and bottom 100 movies.\nE.g.:\n http://akas.imdb.com/chart/top\n http://akas.imdb.com/chart/bottom",
"from imdb.utils import analyze_title",
"from utils ... |
"""
parser.http.searchKeywordParser module (imdb package).
This module provides the HTMLSearchKeywordParser class (and the
search_company_parser instance), used to parse the results of a search
for a given keyword.
E.g., when searching for the keyword "alabama", the parsed page would be:
http://akas.imdb.com/find?s=kw;mx=20;q=alabama
Copyright 2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from utils import Extractor, Attribute, analyze_imdbid
from imdb.utils import analyze_title, analyze_company_name
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicKeywordParser(DOMBasicMovieParser):
"""Simply get the name of a keyword.
It's used by the DOMHTMLSearchKeywordParser class to return a result
for a direct match (when a search on IMDb results in a single
keyword, the web server sends directly the keyword page.
"""
# XXX: it's still to be tested!
# I'm not even sure there can be a direct hit, searching for keywords.
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, searching for keywords similar to
the one given."""
_BaseParser = DOMBasicKeywordParser
_notDirectHitTitle = '<title>imdb keyword'
_titleBuilder = lambda self, x: x
_linkPrefix = '/keyword/'
_attrs = [Attribute(key='data',
multi=True,
path="./a[1]/text()"
)]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/keyword/')]/..",
attrs=_attrs)]
def custom_analyze_title4kwd(title, yearNote, outline):
"""Return a dictionary with the needed info."""
title = title.strip()
if not title:
return {}
if yearNote:
yearNote = '%s)' % yearNote.split(' ')[0]
title = title + ' ' + yearNote
retDict = analyze_title(title)
if outline:
retDict['plot outline'] = outline
return retDict
class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, searching for movies with the given
keyword."""
_notDirectHitTitle = '<title>best'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'info': "./a[1]//text()",
'ynote': "./span[@class='desc']/text()",
'outline': "./span[@class='outline']//text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
custom_analyze_title4kwd(x.get('info') or u'',
x.get('ynote') or u'',
x.get('outline') or u'')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/title/tt')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_keyword_parser': ((DOMHTMLSearchKeywordParser,),
{'kind': 'keyword', '_basic_parser': DOMBasicKeywordParser}),
'search_moviekeyword_parser': ((DOMHTMLSearchMovieKeywordParser,), None)
}
| [
[
8,
0,
0.1171,
0.2252,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2432,
0.009,
0,
0.66,
0.125,
970,
0,
3,
0,
0,
970,
0,
0
],
[
1,
0,
0.2523,
0.009,
0,
0.66,
... | [
"\"\"\"\nparser.http.searchKeywordParser module (imdb package).\n\nThis module provides the HTMLSearchKeywordParser class (and the\nsearch_company_parser instance), used to parse the results of a search\nfor a given keyword.\nE.g., when searching for the keyword \"alabama\", the parsed page would be:\n http://ak... |
"""
parser.http.searchCompanyParser module (imdb package).
This module provides the HTMLSearchCompanyParser class (and the
search_company_parser instance), used to parse the results of a search
for a given company.
E.g., when searching for the name "Columbia Pictures", the parsed page would be:
http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_company_name, build_company_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCompanyParser(DOMBasicMovieParser):
"""Simply get the name of a company and the imdbID.
It's used by the DOMHTMLSearchCompanyParser class to return a result
for a direct match (when a search on IMDb results in a single
company, the web server sends directly the company page.
"""
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCompanyParser
_notDirectHitTitle = '<title>imdb company'
_titleBuilder = lambda self, x: build_company_name(x)
_linkPrefix = '/company/co'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'notes': "./text()[1]"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name')+(x.get('notes')
or u''), stripNotes=True)
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/company/co')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,),
{'kind': 'company', '_basic_parser': DOMBasicCompanyParser})
}
| [
[
8,
0,
0.1901,
0.3662,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3944,
0.0141,
0,
0.66,
0.1667,
896,
0,
2,
0,
0,
896,
0,
0
],
[
1,
0,
0.4085,
0.0141,
0,
0.66... | [
"\"\"\"\nparser.http.searchCompanyParser module (imdb package).\n\nThis module provides the HTMLSearchCompanyParser class (and the\nsearch_company_parser instance), used to parse the results of a search\nfor a given company.\nE.g., when searching for the name \"Columbia Pictures\", the parsed page would be:\n ht... |
"""
parser.http.searchPersonParser module (imdb package).
This module provides the HTMLSearchPersonParser class (and the
search_person_parser instance), used to parse the results of a search
for a given person.
E.g., when searching for the name "Mel Gibson", the parsed page would be:
http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
def _cleanName(n):
"""Clean the name in a title tag."""
if not n:
return u''
n = n.replace('Filmography by type for', '') # FIXME: temporary.
return n
class DOMBasicPersonParser(DOMBasicMovieParser):
"""Simply get the name of a person and the imdbID.
It's used by the DOMHTMLSearchPersonParser class to return a result
for a direct match (when a search on IMDb results in a single
person, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1)
_reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)',
re.I | re.M)
class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for persons."""
_BaseParser = DOMBasicPersonParser
_notDirectHitTitle = '<title>imdb name'
_titleBuilder = lambda self, x: build_name(x, canonical=True)
_linkPrefix = '/name/nm'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'index': "./text()[1]",
'akas': ".//div[@class='_imdbpyAKA']/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
analyze_name((x.get('name') or u'') + \
(x.get('index') or u''),
canonical=1), x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, '/name/nm')]/..",
attrs=_attrs)]
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:1024].lower():
html_string = _reAKASp.sub(
r'\1<div class="_imdbpyAKA">\2::</div>\3',
html_string)
return DOMHTMLSearchMovieParser.preprocess_string(self, html_string)
_OBJECTS = {
'search_person_parser': ((DOMHTMLSearchPersonParser,),
{'kind': 'person', '_basic_parser': DOMBasicPersonParser})
}
| [
[
8,
0,
0.1467,
0.2826,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3043,
0.0109,
0,
0.66,
0.1111,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3152,
0.0109,
0,
0.66... | [
"\"\"\"\nparser.http.searchPersonParser module (imdb package).\n\nThis module provides the HTMLSearchPersonParser class (and the\nsearch_person_parser instance), used to parse the results of a search\nfor a given person.\nE.g., when searching for the name \"Mel Gibson\", the parsed page would be:\n http://akas.i... |
"""
parser.http.bsouplxml.etree module (imdb.parser.http package).
This module adapts the beautifulsoup interface to lxml.etree module.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import _bsoup as BeautifulSoup
from _bsoup import Tag as Element
import bsoupxpath
# Not directly used by IMDbPY, but do not remove: it's used by IMDbPYKit,
# for example.
def fromstring(xml_string):
"""Return a DOM representation of the string."""
# We try to not use BeautifulSoup.BeautifulStoneSoup.XML_ENTITIES,
# for convertEntities.
return BeautifulSoup.BeautifulStoneSoup(xml_string,
convertEntities=None).findChild(True)
def tostring(element, encoding=None, pretty_print=False):
"""Return a string or unicode representation of an element."""
if encoding is unicode:
encoding = None
# For BeautifulSoup 3.1
#encArgs = {'prettyPrint': pretty_print}
#if encoding is not None:
# encArgs['encoding'] = encoding
#return element.encode(**encArgs)
return element.__str__(encoding, pretty_print)
def setattribute(tag, name, value):
tag[name] = value
def xpath(node, expr):
"""Apply an xpath expression to a node. Return a list of nodes."""
#path = bsoupxpath.Path(expr)
path = bsoupxpath.get_path(expr)
return path.apply(node)
# XXX: monkey patching the beautifulsoup tag class
class _EverythingIsNestable(dict):
""""Fake that every tag is nestable."""
def get(self, key, *args, **kwds):
return []
BeautifulSoup.BeautifulStoneSoup.NESTABLE_TAGS = _EverythingIsNestable()
BeautifulSoup.Tag.tag = property(fget=lambda self: self.name)
BeautifulSoup.Tag.attrib = property(fget=lambda self: self)
BeautifulSoup.Tag.text = property(fget=lambda self: self.string)
BeautifulSoup.Tag.set = setattribute
BeautifulSoup.Tag.getparent = lambda self: self.parent
BeautifulSoup.Tag.drop_tree = BeautifulSoup.Tag.extract
BeautifulSoup.Tag.xpath = xpath
# TODO: setting the text attribute for tags
| [
[
8,
0,
0.1533,
0.2933,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.32,
0.0133,
0,
0.66,
0.0625,
443,
0,
1,
0,
0,
443,
0,
0
],
[
1,
0,
0.3333,
0.0133,
0,
0.66,
... | [
"\"\"\"\nparser.http.bsouplxml.etree module (imdb.parser.http package).\n\nThis module adapts the beautifulsoup interface to lxml.etree module.\n\nCopyright 2008 H. Turgut Uyar <uyar@tekir.org>\n 2008 Davide Alberani <da@erlug.linux.it>",
"import _bsoup as BeautifulSoup",
"from _bsoup import Tag as Ele... |
"""
parser.http.bsoupxpath module (imdb.parser.http package).
This module provides XPath support for BeautifulSoup.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__author__ = 'H. Turgut Uyar <uyar@tekir.org>'
__docformat__ = 'restructuredtext'
import re
import string
import _bsoup as BeautifulSoup
# XPath related enumerations and constants
AXIS_ANCESTOR = 'ancestor'
AXIS_ATTRIBUTE = 'attribute'
AXIS_CHILD = 'child'
AXIS_DESCENDANT = 'descendant'
AXIS_FOLLOWING = 'following'
AXIS_FOLLOWING_SIBLING = 'following-sibling'
AXIS_PRECEDING_SIBLING = 'preceding-sibling'
AXES = (AXIS_ANCESTOR, AXIS_ATTRIBUTE, AXIS_CHILD, AXIS_DESCENDANT,
AXIS_FOLLOWING, AXIS_FOLLOWING_SIBLING, AXIS_PRECEDING_SIBLING)
XPATH_FUNCTIONS = ('starts-with', 'string-length')
def tokenize_path(path):
"""Tokenize a location path into location steps. Return the list of steps.
If two steps are separated by a double slash, the double slashes are part of
the second step. If they are separated by only one slash, the slash is not
included in any of the steps.
"""
# form a list of tuples that mark the start and end positions of steps
separators = []
last_position = 0
i = -1
in_string = False
while i < len(path) - 1:
i = i + 1
if path[i] == "'":
in_string = not in_string
if in_string:
# slashes within strings are not step separators
continue
if path[i] == '/':
if i > 0:
separators.append((last_position, i))
if (path[i+1] == '/'):
last_position = i
i = i + 1
else:
last_position = i + 1
separators.append((last_position, len(path)))
steps = []
for start, end in separators:
steps.append(path[start:end])
return steps
class Path:
"""A location path.
"""
def __init__(self, path, parse=True):
self.path = path
self.steps = []
if parse:
if (path[0] == '/') and (path[1] != '/'):
# if not on the descendant axis, remove the leading slash
path = path[1:]
steps = tokenize_path(path)
for step in steps:
self.steps.append(PathStep(step))
def apply(self, node):
"""Apply the path to a node. Return the resulting list of nodes.
Apply the steps in the path sequentially by sending the output of each
step as input to the next step.
"""
# FIXME: this should return a node SET, not a node LIST
# or at least a list with no duplicates
if self.path[0] == '/':
# for an absolute path, start from the root
if not isinstance(node, BeautifulSoup.Tag) \
or (node.name != '[document]'):
node = node.findParent('[document]')
nodes = [node]
for step in self.steps:
nodes = step.apply(nodes)
return nodes
class PathStep:
"""A location step in a location path.
"""
AXIS_PATTERN = r"""(%s)::|@""" % '|'.join(AXES)
NODE_TEST_PATTERN = r"""\w+(\(\))?"""
PREDICATE_PATTERN = r"""\[(.*?)\]"""
LOCATION_STEP_PATTERN = r"""(%s)?(%s)((%s)*)""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN, PREDICATE_PATTERN)
_re_location_step = re.compile(LOCATION_STEP_PATTERN)
PREDICATE_NOT_PATTERN = r"""not\((.*?)\)"""
PREDICATE_AXIS_PATTERN = r"""(%s)?(%s)(='(.*?)')?""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN)
PREDICATE_FUNCTION_PATTERN = r"""(%s)\(([^,]+(,\s*[^,]+)*)?\)(=(.*))?""" \
% '|'.join(XPATH_FUNCTIONS)
_re_predicate_not = re.compile(PREDICATE_NOT_PATTERN)
_re_predicate_axis = re.compile(PREDICATE_AXIS_PATTERN)
_re_predicate_function = re.compile(PREDICATE_FUNCTION_PATTERN)
def __init__(self, step):
self.step = step
if (step == '.') or (step == '..'):
return
if step[:2] == '//':
default_axis = AXIS_DESCENDANT
step = step[2:]
else:
default_axis = AXIS_CHILD
step_match = self._re_location_step.match(step)
# determine the axis
axis = step_match.group(1)
if axis is None:
self.axis = default_axis
elif axis == '@':
self.axis = AXIS_ATTRIBUTE
else:
self.axis = step_match.group(2)
self.soup_args = {}
self.index = None
self.node_test = step_match.group(3)
if self.node_test == 'text()':
self.soup_args['text'] = True
else:
self.soup_args['name'] = self.node_test
self.checkers = []
predicates = step_match.group(5)
if predicates is not None:
predicates = [p for p in predicates[1:-1].split('][') if p]
for predicate in predicates:
checker = self.__parse_predicate(predicate)
if checker is not None:
self.checkers.append(checker)
def __parse_predicate(self, predicate):
"""Parse the predicate. Return a callable that can be used to filter
nodes. Update `self.soup_args` to take advantage of BeautifulSoup search
features.
"""
try:
position = int(predicate)
if self.axis == AXIS_DESCENDANT:
return PredicateFilter('position', value=position)
else:
# use the search limit feature instead of a checker
self.soup_args['limit'] = position
self.index = position - 1
return None
except ValueError:
pass
if predicate == "last()":
self.index = -1
return None
negate = self._re_predicate_not.match(predicate)
if negate:
predicate = negate.group(1)
function_match = self._re_predicate_function.match(predicate)
if function_match:
name = function_match.group(1)
arguments = function_match.group(2)
value = function_match.group(4)
if value is not None:
value = function_match.group(5)
return PredicateFilter(name, arguments, value)
axis_match = self._re_predicate_axis.match(predicate)
if axis_match:
axis = axis_match.group(1)
if axis is None:
axis = AXIS_CHILD
elif axis == '@':
axis = AXIS_ATTRIBUTE
if axis == AXIS_ATTRIBUTE:
# use the attribute search feature instead of a checker
attribute_name = axis_match.group(3)
if axis_match.group(5) is not None:
attribute_value = axis_match.group(6)
elif not negate:
attribute_value = True
else:
attribute_value = None
if not self.soup_args.has_key('attrs'):
self.soup_args['attrs'] = {}
self.soup_args['attrs'][attribute_name] = attribute_value
return None
elif axis == AXIS_CHILD:
node_test = axis_match.group(3)
node_value = axis_match.group(6)
return PredicateFilter('axis', node_test, value=node_value,
negate=negate)
raise NotImplementedError("This predicate is not implemented")
def apply(self, nodes):
"""Apply the step to a list of nodes. Return the list of nodes for the
next step.
"""
if self.step == '.':
return nodes
elif self.step == '..':
return [node.parent for node in nodes]
result = []
for node in nodes:
if self.axis == AXIS_CHILD:
found = node.findAll(recursive=False, **self.soup_args)
elif self.axis == AXIS_DESCENDANT:
found = node.findAll(recursive=True, **self.soup_args)
elif self.axis == AXIS_ATTRIBUTE:
try:
found = [node[self.node_test]]
except KeyError:
found = []
elif self.axis == AXIS_FOLLOWING_SIBLING:
found = node.findNextSiblings(**self.soup_args)
elif self.axis == AXIS_PRECEDING_SIBLING:
# TODO: make sure that the result is reverse ordered
found = node.findPreviousSiblings(**self.soup_args)
elif self.axis == AXIS_FOLLOWING:
# find the last descendant of this node
last = node
while (not isinstance(last, BeautifulSoup.NavigableString)) \
and (len(last.contents) > 0):
last = last.contents[-1]
found = last.findAllNext(**self.soup_args)
elif self.axis == AXIS_ANCESTOR:
found = node.findParents(**self.soup_args)
# this should only be active if there is a position predicate
# and the axis is not 'descendant'
if self.index is not None:
if found:
if len(found) > self.index:
found = [found[self.index]]
else:
found = []
if found:
for checker in self.checkers:
found = filter(checker, found)
result.extend(found)
return result
class PredicateFilter:
"""A callable class for filtering nodes.
"""
def __init__(self, name, arguments=None, value=None, negate=False):
self.name = name
self.arguments = arguments
self.negate = negate
if name == 'position':
self.__filter = self.__position
self.value = value
elif name == 'axis':
self.__filter = self.__axis
self.node_test = arguments
self.value = value
elif name == 'starts-with':
self.__filter = self.__starts_with
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:], args[1][1:-1])
else:
self.arguments = (False, args[0], args[1][1:-1])
elif name == 'string-length':
self.__filter = self.__string_length
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:])
else:
self.arguments = (False, args[0])
self.value = int(value)
else:
raise NotImplementedError("This XPath function is not implemented")
def __call__(self, node):
if self.negate:
return not self.__filter(node)
else:
return self.__filter(node)
def __position(self, node):
if isinstance(node, BeautifulSoup.NavigableString):
actual_position = len(node.findPreviousSiblings(text=True)) + 1
else:
actual_position = len(node.findPreviousSiblings(node.name)) + 1
return actual_position == self.value
def __axis(self, node):
if self.node_test == 'text()':
return node.string == self.value
else:
children = node.findAll(self.node_test, recursive=False)
if len(children) > 0 and self.value is None:
return True
for child in children:
if child.string == self.value:
return True
return False
def __starts_with(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
first = node[attribute_name]
return first.startswith(self.arguments[2])
elif self.arguments[1] == 'text()':
first = node.contents[0]
if isinstance(first, BeautifulSoup.NavigableString):
return first.startswith(self.arguments[2])
return False
def __string_length(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
value = node[attribute_name]
else:
value = None
elif self.arguments[1] == 'text()':
value = node.string
if value is not None:
return len(value) == self.value
return False
_paths = {}
_steps = {}
def get_path(path):
"""Utility for eliminating repeated parsings of the same paths and steps.
"""
if not _paths.has_key(path):
p = Path(path, parse=False)
steps = tokenize_path(path)
for step in steps:
if not _steps.has_key(step):
_steps[step] = PathStep(step)
p.steps.append(_steps[step])
_paths[path] = p
return _paths[path]
| [
[
8,
0,
0.0279,
0.0533,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0584,
0.0025,
0,
0.66,
0.0476,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0609,
0.0025,
0,
0.66... | [
"\"\"\"\nparser.http.bsoupxpath module (imdb.parser.http package).\n\nThis module provides XPath support for BeautifulSoup.\n\nCopyright 2008 H. Turgut Uyar <uyar@tekir.org>\n\nThis program is free software; you can redistribute it and/or modify",
"__author__ = 'H. Turgut Uyar <uyar@tekir.org>'",
"__docformat__... |
"""
parser.http.bsouplxml.html module (imdb.parser.http package).
This module adapts the beautifulsoup interface to lxml.html module.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import _bsoup as BeautifulSoup
def fromstring(html_string):
"""Return a DOM representation of the string."""
return BeautifulSoup.BeautifulSoup(html_string,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES
).findChild(True)
| [
[
8,
0,
0.371,
0.7097,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7742,
0.0323,
0,
0.66,
0.5,
443,
0,
1,
0,
0,
443,
0,
0
],
[
2,
0,
0.9355,
0.1613,
0,
0.66,
... | [
"\"\"\"\nparser.http.bsouplxml.html module (imdb.parser.http package).\n\nThis module adapts the beautifulsoup interface to lxml.html module.\n\nCopyright 2008 H. Turgut Uyar <uyar@tekir.org>\n 2008 Davide Alberani <da@erlug.linux.it>",
"import _bsoup as BeautifulSoup",
"def fromstring(html_string):\n ... |
"""
parser.http.companyParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a company.
E.g., for "Columbia Pictures [us]" the referred page would be:
main details: http://akas.imdb.com/company/co0071509/
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from utils import build_movie, Attribute, Extractor, DOMParserBase, \
analyze_imdbid
from imdb.utils import analyze_company_name
class DOMCompanyParser(DOMParserBase):
"""Parser for the main page of a given company.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
cparser = DOMCompanyParser()
result = cparser.parse(company_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='name',
path="//title",
attrs=Attribute(key='name',
path="./text()",
postprocess=lambda x: \
analyze_company_name(x, stripNotes=True))),
Extractor(label='filmography',
group="//b/a[@name]",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::ol[1]/li",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a[1]/@href",
'title': "./a[1]/text()",
'year': "./text()[1]"
},
postprocess=lambda x:
build_movie(u'%s %s' % \
(x.get('title'), x.get('year').strip()),
movieID=analyze_imdbid(x.get('link') or u''),
_parsingCompany=True))),
]
preprocessors = [
(re.compile('(<b><a name=)', re.I), r'</p>\1')
]
def postprocess_data(self, data):
for key in data.keys():
new_key = key.replace('company', 'companies')
new_key = new_key.replace('other', 'miscellaneous')
new_key = new_key.replace('distributor', 'distributors')
if new_key != key:
data[new_key] = data[key]
del data[key]
return data
_OBJECTS = {
'company_main_parser': ((DOMCompanyParser,), None)
}
| [
[
8,
0,
0.1429,
0.2747,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2967,
0.011,
0,
0.66,
0.2,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3132,
0.022,
0,
0.66,
... | [
"\"\"\"\nparser.http.companyParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse\nthe IMDb pages on the akas.imdb.com server about a company.\nE.g., for \"Columbia Pictures [us]\" the referred page would be:\n main details: http://akas.imdb.com/company/co0071509/... |
"""
parser.http.characterParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a character.
E.g., for "Jesse James" the referred pages would be:
main details: http://www.imdb.com/character/ch0000001/
biography: http://www.imdb.com/character/ch0000001/bio
...and so on...
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from utils import Attribute, Extractor, DOMParserBase, build_movie, \
analyze_imdbid
from personParser import DOMHTMLMaindetailsParser
from imdb.Movie import Movie
_personIDs = re.compile(r'/name/nm([0-9]{7})')
class DOMHTMLCharacterMaindetailsParser(DOMHTMLMaindetailsParser):
"""Parser for the "filmography" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bparser = DOMHTMLCharacterMaindetailsParser()
result = bparser.parse(character_biography_html_string)
"""
_containsObjects = True
_film_attrs = [Attribute(key=None,
multi=True,
path={
'link': "./a[1]/@href",
'title': ".//text()",
'status': "./i/a//text()",
'roleID': "./a/@href"
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
movieID=analyze_imdbid(x.get('link') or u''),
roleID=_personIDs.findall(x.get('roleID') or u''),
status=x.get('status') or None,
_parsingCharacter=True))]
extractors = [
Extractor(label='title',
path="//title",
attrs=Attribute(key='name',
path="./text()",
postprocess=lambda x: \
x.replace(' (Character)', '').replace(
'- Filmography by type', '').strip())),
Extractor(label='headshot',
path="//a[@name='headshot']",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='akas',
path="//div[h5='Alternate Names:']",
attrs=Attribute(key='akas',
path="./div//text()",
postprocess=lambda x: x.strip().split(' / '))),
Extractor(label='filmography',
path="//div[@class='filmo'][not(h5)]/ol/li",
attrs=_film_attrs),
Extractor(label='filmography sections',
group="//div[@class='filmo'][h5]",
group_key="./h5/a/text()",
group_key_normalize=lambda x: x.lower()[:-1],
path="./ol/li",
attrs=_film_attrs),
]
preprocessors = [
# Check that this doesn't cut "status"...
(re.compile(r'<br>(\.\.\.| ).+?</li>', re.I | re.M), '</li>')]
class DOMHTMLCharacterBioParser(DOMParserBase):
"""Parser for the "biography" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bparser = DOMHTMLCharacterBioParser()
result = bparser.parse(character_biography_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='introduction',
path="//div[@id='_intro']",
attrs=Attribute(key='introduction',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='biography',
path="//span[@class='_biography']",
attrs=Attribute(key='biography',
multi=True,
path={
'info': "./preceding-sibling::h4[1]//text()",
'text': ".//text()"
},
postprocess=lambda x: u'%s: %s' % (
x.get('info').strip(),
x.get('text').replace('\n',
' ').replace('||', '\n\n').strip()))),
]
preprocessors = [
(re.compile('(<div id="swiki.2.3.1">)', re.I), r'\1<div id="_intro">'),
(re.compile('(<a name="history">)\s*(<table .*?</table>)',
re.I | re.DOTALL),
r'</div>\2\1</a>'),
(re.compile('(<a name="[^"]+">)(<h4>)', re.I), r'</span>\1</a>\2'),
(re.compile('(</h4>)</a>', re.I), r'\1<span class="_biography">'),
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('\|\|\n', re.I), r'</span>'),
]
class DOMHTMLCharacterQuotesParser(DOMParserBase):
"""Parser for the "quotes" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLCharacterQuotesParser()
result = qparser.parse(character_quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='charquotes',
group="//h5",
group_key="./a/text()",
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path={'txt': ".//text()",
'movieID': ".//a[1]/@href"},
postprocess=lambda x: (analyze_imdbid(x['movieID']),
x['txt'].strip().replace(': ',
': ').replace(': ', ': ').split('||'))))
]
preprocessors = [
(re.compile('(</h5>)', re.I), r'\1<div>'),
(re.compile('\s*<br/><br/>\s*', re.I), r'||'),
(re.compile('\|\|\s*(<hr/>)', re.I), r'</div>\1'),
(re.compile('\s*<br/>\s*', re.I), r'::')
]
def postprocess_data(self, data):
if not data:
return {}
newData = {}
for title in data:
movieID, quotes = data[title]
if movieID is None:
movie = title
else:
movie = Movie(title=title, movieID=movieID,
accessSystem=self._as, modFunct=self._modFunct)
newData[movie] = [quote.split('::') for quote in quotes]
return {'quotes': newData}
from personParser import DOMHTMLSeriesParser
_OBJECTS = {
'character_main_parser': ((DOMHTMLCharacterMaindetailsParser,),
{'kind': 'character'}),
'character_series_parser': ((DOMHTMLSeriesParser,), None),
'character_bio_parser': ((DOMHTMLCharacterBioParser,), None),
'character_quotes_parser': ((DOMHTMLCharacterQuotesParser,), None)
}
| [
[
8,
0,
0.069,
0.133,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1429,
0.0049,
0,
0.66,
0.1,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.1502,
0.0099,
0,
0.66,
... | [
"\"\"\"\nparser.http.characterParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse\nthe IMDb pages on the akas.imdb.com server about a character.\nE.g., for \"Jesse James\" the referred pages would be:\n main details: http://www.imdb.com/character/ch0000001/\n ... |
"""
parser.http.searchCharacterParser module (imdb package).
This module provides the HTMLSearchCharacterParser class (and the
search_character_parser instance), used to parse the results of a search
for a given character.
E.g., when searching for the name "Jesse James", the parsed page would be:
http://akas.imdb.com/find?s=Characters;mx=20;q=Jesse+James
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCharacterParser(DOMBasicMovieParser):
"""Simply get the name of a character and the imdbID.
It's used by the DOMHTMLSearchCharacterParser class to return a result
for a direct match (when a search on IMDb results in a single
character, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(x or u'', canonical=False)
class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCharacterParser
_notDirectHitTitle = '<title>imdb search'
_titleBuilder = lambda self, x: build_name(x, canonical=False)
_linkPrefix = '/character/ch'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
{'name': x.get('name')}
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/character/ch')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_character_parser': ((DOMHTMLSearchCharacterParser,),
{'kind': 'character', '_basic_parser': DOMBasicCharacterParser})
}
| [
[
8,
0,
0.1957,
0.3768,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4058,
0.0145,
0,
0.66,
0.1667,
896,
0,
2,
0,
0,
896,
0,
0
],
[
1,
0,
0.4203,
0.0145,
0,
0.66... | [
"\"\"\"\nparser.http.searchCharacterParser module (imdb package).\n\nThis module provides the HTMLSearchCharacterParser class (and the\nsearch_character_parser instance), used to parse the results of a search\nfor a given character.\nE.g., when searching for the name \"Jesse James\", the parsed page would be:\n ... |
"""
parser package (imdb package).
This package provides various parsers to access IMDb data (e.g.: a
parser for the web/http interface, a parser for the SQL database
interface, etc.).
So far, the http/httpThin, mobile and sql parsers are implemented.
Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['http', 'mobile', 'sql']
| [
[
8,
0,
0.4464,
0.8571,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.9286,
0.0357,
0,
0.66,
1,
272,
0,
0,
0,
0,
0,
5,
0
]
] | [
"\"\"\"\nparser package (imdb package).\n\nThis package provides various parsers to access IMDb data (e.g.: a\nparser for the web/http interface, a parser for the SQL database\ninterface, etc.).\nSo far, the http/httpThin, mobile and sql parsers are implemented.",
"__all__ = ['http', 'mobile', 'sql']"
] |
"""
_logging module (imdb package).
This module provides the logging facilities used by the imdb package.
Copyright 2009-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warn': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
imdbpyLogger = logging.getLogger('imdbpy')
imdbpyStreamHandler = logging.StreamHandler()
imdbpyFormatter = logging.Formatter('%(asctime)s %(levelname)s [%(name)s]' \
' %(pathname)s:%(lineno)d: %(message)s')
imdbpyStreamHandler.setFormatter(imdbpyFormatter)
imdbpyLogger.addHandler(imdbpyStreamHandler)
def setLevel(level):
"""Set logging level for the main logger."""
level = level.lower().strip()
imdbpyLogger.setLevel(LEVELS.get(level, logging.NOTSET))
imdbpyLogger.log(imdbpyLogger.level, 'set logging threshold to "%s"',
logging.getLevelName(imdbpyLogger.level))
#imdbpyLogger.setLevel(logging.DEBUG)
# It can be an idea to have a single function to log and warn:
#import warnings
#def log_and_warn(msg, args=None, logger=None, level=None):
# """Log the message and issue a warning."""
# if logger is None:
# logger = imdbpyLogger
# if level is None:
# level = logging.WARNING
# if args is None:
# args = ()
# #warnings.warn(msg % args, stacklevel=0)
# logger.log(level, msg % args)
| [
[
8,
0,
0.1746,
0.3333,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3651,
0.0159,
0,
0.66,
0.125,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.4365,
0.0952,
0,
0.66... | [
"\"\"\"\n_logging module (imdb package).\n\nThis module provides the logging facilities used by the imdb package.\n\nCopyright 2009-2010 Davide Alberani <da@erlug.linux.it>\n\nThis program is free software; you can redistribute it and/or modify",
"import logging",
"LEVELS = {'debug': logging.DEBUG,\n 'in... |
"""
_exceptions module (imdb package).
This module provides the exception hierarchy used by the imdb package.
Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
class IMDbError(Exception):
"""Base class for every exception raised by the imdb package."""
_logger = logging.getLogger('imdbpy')
def __init__(self, *args, **kwargs):
"""Initialize the exception and pass the message to the log system."""
# Every raised exception also dispatch a critical log.
self._logger.critical('%s exception raised; args: %s; kwds: %s',
self.__class__.__name__, args, kwargs,
exc_info=True)
super(IMDbError, self).__init__(*args, **kwargs)
class IMDbDataAccessError(IMDbError):
"""Exception raised when is not possible to access needed data."""
pass
class IMDbParserError(IMDbError):
"""Exception raised when an error occurred parsing the data."""
pass
| [
[
8,
0,
0.2391,
0.4565,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.5,
0.0217,
0,
0.66,
0.25,
715,
0,
1,
0,
0,
715,
0,
0
],
[
3,
0,
0.6739,
0.2391,
0,
0.66,
... | [
"\"\"\"\n_exceptions module (imdb package).\n\nThis module provides the exception hierarchy used by the imdb package.\n\nCopyright 2004-2009 Davide Alberani <da@erlug.linux.it>\n\nThis program is free software; you can redistribute it and/or modify",
"import logging",
"class IMDbError(Exception):\n \"\"\"Bas... |
"""
articles module (imdb package).
This module provides functions and data to handle in a smart way
articles (in various languages) at the beginning of movie titles.
Copyright 2009 Davide Alberani <da@erlug.linux.it>
2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# List of generic articles used when the language of the title is unknown (or
# we don't have information about articles in that language).
# XXX: Managing titles in a lot of different languages, a function to recognize
# an initial article can't be perfect; sometimes we'll stumble upon a short
# word that is an article in some language, but it's not in another; in these
# situations we have to choose if we want to interpret this little word
# as an article or not (remember that we don't know what the original language
# of the title was).
# Example: 'en' is (I suppose) an article in Some Language. Unfortunately it
# seems also to be a preposition in other languages (French?).
# Running a script over the whole list of titles (and aliases), I've found
# that 'en' is used as an article only 376 times, and as another thing 594
# times, so I've decided to _always_ consider 'en' as a non article.
#
# Here is a list of words that are _never_ considered as articles, complete
# with the cound of times they are used in a way or another:
# 'en' (376 vs 594), 'to' (399 vs 727), 'as' (198 vs 276), 'et' (79 vs 99),
# 'des' (75 vs 150), 'al' (78 vs 304), 'ye' (14 vs 70),
# 'da' (23 vs 298), "'n" (8 vs 12)
#
# I've left in the list 'i' (1939 vs 2151) and 'uno' (52 vs 56)
# I'm not sure what '-al' is, and so I've left it out...
#
# Generic list of articles in utf-8 encoding:
GENERIC_ARTICLES = ('the', 'la', 'a', 'die', 'der', 'le', 'el',
"l'", 'il', 'das', 'les', 'i', 'o', 'ein', 'un', 'de', 'los',
'an', 'una', 'las', 'eine', 'den', 'het', 'gli', 'lo', 'os',
'ang', 'oi', 'az', 'een', 'ha-', 'det', 'ta', 'al-',
'mga', "un'", 'uno', 'ett', 'dem', 'egy', 'els', 'eines',
'\xc3\x8f', '\xc3\x87', '\xc3\x94\xc3\xaf', '\xc3\x8f\xc3\xa9')
# Lists of articles separated by language. If possible, the list should
# be sorted by frequency (not very important, but...)
# If you want to add a list of articles for another language, mail it
# it at imdbpy-devel@lists.sourceforge.net; non-ascii articles must be utf-8
# encoded.
LANG_ARTICLES = {
'English': ('the', 'a', 'an'),
'Italian': ('la', 'le', "l'", 'il', 'i', 'un', 'una', 'gli', 'lo', "un'",
'uno'),
'Spanish': ('la', 'le', 'el', 'les', 'un', 'los', 'una', 'uno', 'unos',
'unas'),
'Portuguese': ('a', 'as', 'o', 'os', 'um', 'uns', 'uma', 'umas'),
'Turkish': (), # Some languages doesn't have articles.
}
LANG_ARTICLESget = LANG_ARTICLES.get
# Maps a language to countries where it is the main language.
# If you want to add an entry for another language or country, mail it at
# imdbpy-devel@lists.sourceforge.net .
_LANG_COUNTRIES = {
'English': ('USA', 'UK', 'Canada', 'Ireland', 'Australia'),
'Italian': ('Italy',),
'Spanish': ('Spain', 'Mexico'),
'Portuguese': ('Portugal', 'Brazil'),
'Turkish': ('Turkey',),
#'German': ('Germany', 'East Germany', 'West Germany'),
#'French': ('France'),
}
# Maps countries to their main language.
COUNTRY_LANG = {}
for lang in _LANG_COUNTRIES:
for country in _LANG_COUNTRIES[lang]:
COUNTRY_LANG[country] = lang
def toUnicode(articles):
"""Convert a list of articles utf-8 encoded to unicode strings."""
return tuple([art.decode('utf_8') for art in articles])
def toDicts(articles):
"""Given a list of utf-8 encoded articles, build two dictionary (one
utf-8 encoded and another one with unicode keys) for faster matches."""
uArticles = toUnicode(articles)
return dict([(x, x) for x in articles]), dict([(x, x) for x in uArticles])
def addTrailingSpace(articles):
"""From the given list of utf-8 encoded articles, return two
lists (one utf-8 encoded and another one in unicode) where a space
is added at the end - if the last char is not ' or -."""
_spArticles = []
_spUnicodeArticles = []
for article in articles:
if article[-1] not in ("'", '-'):
article += ' '
_spArticles.append(article)
_spUnicodeArticles.append(article.decode('utf_8'))
return _spArticles, _spUnicodeArticles
# Caches.
_ART_CACHE = {}
_SP_ART_CACHE = {}
def articlesDictsForLang(lang):
"""Return dictionaries of articles specific for the given language, or the
default one if the language is not known."""
if lang in _ART_CACHE:
return _ART_CACHE[lang]
artDicts = toDicts(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_ART_CACHE[lang] = artDicts
return artDicts
def spArticlesForLang(lang):
"""Return lists of articles (plus optional spaces) specific for the
given language, or the default one if the language is not known."""
if lang in _SP_ART_CACHE:
return _SP_ART_CACHE[lang]
spArticles = addTrailingSpace(LANG_ARTICLESget(lang, GENERIC_ARTICLES))
_SP_ART_CACHE[lang] = spArticles
return spArticles
| [
[
8,
0,
0.0845,
0.162,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3627,
0.0423,
0,
0.66,
0.0769,
124,
0,
0,
0,
0,
0,
8,
0
],
[
14,
0,
0.4648,
0.0634,
0,
0.66,... | [
"\"\"\"\narticles module (imdb package).\n\nThis module provides functions and data to handle in a smart way\narticles (in various languages) at the beginning of movie titles.\n\nCopyright 2009 Davide Alberani <da@erlug.linux.it>\n 2009 H. Turgut Uyar <uyar@tekir.org>",
"GENERIC_ARTICLES = ('the', 'la',... |
"""
helpers module (imdb package).
This module provides functions not used directly by the imdb package,
but useful for IMDbPY-based programs.
Copyright 2006-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# XXX: find better names for the functions in this modules.
import re
from cgi import escape
import gettext
from gettext import gettext as _
gettext.textdomain('imdbpy')
# The modClearRefs can be used to strip names and titles references from
# the strings in Movie and Person objects.
from imdb.utils import modClearRefs, re_titleRef, re_nameRef, \
re_characterRef, _tagAttr, _Container, TAGS_TO_MODIFY
from imdb import IMDb, imdbURL_movie_base, imdbURL_person_base, \
imdbURL_character_base
import imdb.locale
from imdb.Movie import Movie
from imdb.Person import Person
from imdb.Character import Character
from imdb.Company import Company
from imdb.parser.http.utils import re_entcharrefssub, entcharrefs, \
subXMLRefs, subSGMLRefs
from imdb.parser.http.bsouplxml.etree import BeautifulSoup
# An URL, more or less.
_re_href = re.compile(r'(http://.+?)(?=\s|$)', re.I)
_re_hrefsub = _re_href.sub
def makeCgiPrintEncoding(encoding):
"""Make a function to pretty-print strings for the web."""
def cgiPrint(s):
"""Encode the given string using the %s encoding, and replace
chars outside the given charset with XML char references.""" % encoding
s = escape(s, quote=1)
if isinstance(s, unicode):
s = s.encode(encoding, 'xmlcharrefreplace')
return s
return cgiPrint
# cgiPrint uses the latin_1 encoding.
cgiPrint = makeCgiPrintEncoding('latin_1')
# Regular expression for %(varname)s substitutions.
re_subst = re.compile(r'%\((.+?)\)s')
# Regular expression for <if condition>....</if condition> clauses.
re_conditional = re.compile(r'<if\s+(.+?)\s*>(.+?)</if\s+\1\s*>')
def makeTextNotes(replaceTxtNotes):
"""Create a function useful to handle text[::optional_note] values.
replaceTxtNotes is a format string, which can include the following
values: %(text)s and %(notes)s.
Portions of the text can be conditionally excluded, if one of the
values is absent. E.g.: <if notes>[%(notes)s]</if notes> will be replaced
with '[notes]' if notes exists, or by an empty string otherwise.
The returned function is suitable be passed as applyToValues argument
of the makeObject2Txt function."""
def _replacer(s):
outS = replaceTxtNotes
if not isinstance(s, (unicode, str)):
return s
ssplit = s.split('::', 1)
text = ssplit[0]
# Used to keep track of text and note existence.
keysDict = {}
if text:
keysDict['text'] = True
outS = outS.replace('%(text)s', text)
if len(ssplit) == 2:
keysDict['notes'] = True
outS = outS.replace('%(notes)s', ssplit[1])
else:
outS = outS.replace('%(notes)s', u'')
def _excludeFalseConditionals(matchobj):
# Return an empty string if the conditional is false/empty.
if matchobj.group(1) in keysDict:
return matchobj.group(2)
return u''
while re_conditional.search(outS):
outS = re_conditional.sub(_excludeFalseConditionals, outS)
return outS
return _replacer
def makeObject2Txt(movieTxt=None, personTxt=None, characterTxt=None,
companyTxt=None, joiner=' / ',
applyToValues=lambda x: x, _recurse=True):
""""Return a function useful to pretty-print Movie, Person,
Character and Company instances.
*movieTxt* -- how to format a Movie object.
*personTxt* -- how to format a Person object.
*characterTxt* -- how to format a Character object.
*companyTxt* -- how to format a Company object.
*joiner* -- string used to join a list of objects.
*applyToValues* -- function to apply to values.
*_recurse* -- if True (default) manage only the given object.
"""
# Some useful defaults.
if movieTxt is None:
movieTxt = '%(long imdb title)s'
if personTxt is None:
personTxt = '%(long imdb name)s'
if characterTxt is None:
characterTxt = '%(long imdb name)s'
if companyTxt is None:
companyTxt = '%(long imdb name)s'
def object2txt(obj, _limitRecursion=None):
"""Pretty-print objects."""
# Prevent unlimited recursion.
if _limitRecursion is None:
_limitRecursion = 0
elif _limitRecursion > 5:
return u''
_limitRecursion += 1
if isinstance(obj, (list, tuple)):
return joiner.join([object2txt(o, _limitRecursion=_limitRecursion)
for o in obj])
elif isinstance(obj, dict):
# XXX: not exactly nice, neither useful, I fear.
return joiner.join([u'%s::%s' %
(object2txt(k, _limitRecursion=_limitRecursion),
object2txt(v, _limitRecursion=_limitRecursion))
for k, v in obj.items()])
objData = {}
if isinstance(obj, Movie):
objData['movieID'] = obj.movieID
outs = movieTxt
elif isinstance(obj, Person):
objData['personID'] = obj.personID
outs = personTxt
elif isinstance(obj, Character):
objData['characterID'] = obj.characterID
outs = characterTxt
elif isinstance(obj, Company):
objData['companyID'] = obj.companyID
outs = companyTxt
else:
return obj
def _excludeFalseConditionals(matchobj):
# Return an empty string if the conditional is false/empty.
condition = matchobj.group(1)
proceed = obj.get(condition) or getattr(obj, condition, None)
if proceed:
return matchobj.group(2)
else:
return u''
return matchobj.group(2)
while re_conditional.search(outs):
outs = re_conditional.sub(_excludeFalseConditionals, outs)
for key in re_subst.findall(outs):
value = obj.get(key) or getattr(obj, key, None)
if not isinstance(value, (unicode, str)):
if not _recurse:
if value:
value = unicode(value)
if value:
value = object2txt(value, _limitRecursion=_limitRecursion)
elif value:
value = applyToValues(unicode(value))
if not value:
value = u''
elif not isinstance(value, (unicode, str)):
value = unicode(value)
outs = outs.replace(u'%(' + key + u')s', value)
return outs
return object2txt
def makeModCGILinks(movieTxt, personTxt, characterTxt=None,
encoding='latin_1'):
"""Make a function used to pretty-print movies and persons refereces;
movieTxt and personTxt are the strings used for the substitutions.
movieTxt must contains %(movieID)s and %(title)s, while personTxt
must contains %(personID)s and %(name)s and characterTxt %(characterID)s
and %(name)s; characterTxt is optional, for backward compatibility."""
_cgiPrint = makeCgiPrintEncoding(encoding)
def modCGILinks(s, titlesRefs, namesRefs, characterRefs=None):
"""Substitute movies and persons references."""
if characterRefs is None: characterRefs = {}
# XXX: look ma'... more nested scopes! <g>
def _replaceMovie(match):
to_replace = match.group(1)
item = titlesRefs.get(to_replace)
if item:
movieID = item.movieID
to_replace = movieTxt % {'movieID': movieID,
'title': unicode(_cgiPrint(to_replace),
encoding,
'xmlcharrefreplace')}
return to_replace
def _replacePerson(match):
to_replace = match.group(1)
item = namesRefs.get(to_replace)
if item:
personID = item.personID
to_replace = personTxt % {'personID': personID,
'name': unicode(_cgiPrint(to_replace),
encoding,
'xmlcharrefreplace')}
return to_replace
def _replaceCharacter(match):
to_replace = match.group(1)
if characterTxt is None:
return to_replace
item = characterRefs.get(to_replace)
if item:
characterID = item.characterID
if characterID is None:
return to_replace
to_replace = characterTxt % {'characterID': characterID,
'name': unicode(_cgiPrint(to_replace),
encoding,
'xmlcharrefreplace')}
return to_replace
s = s.replace('<', '<').replace('>', '>')
s = _re_hrefsub(r'<a href="\1">\1</a>', s)
s = re_titleRef.sub(_replaceMovie, s)
s = re_nameRef.sub(_replacePerson, s)
s = re_characterRef.sub(_replaceCharacter, s)
return s
modCGILinks.movieTxt = movieTxt
modCGILinks.personTxt = personTxt
modCGILinks.characterTxt = characterTxt
return modCGILinks
# links to the imdb.com web site.
_movieTxt = '<a href="' + imdbURL_movie_base + 'tt%(movieID)s">%(title)s</a>'
_personTxt = '<a href="' + imdbURL_person_base + 'nm%(personID)s">%(name)s</a>'
_characterTxt = '<a href="' + imdbURL_character_base + \
'ch%(characterID)s">%(name)s</a>'
modHtmlLinks = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
characterTxt=_characterTxt)
modHtmlLinksASCII = makeModCGILinks(movieTxt=_movieTxt, personTxt=_personTxt,
characterTxt=_characterTxt,
encoding='ascii')
everyentcharrefs = entcharrefs.copy()
for k, v in {'lt':u'<','gt':u'>','amp':u'&','quot':u'"','apos':u'\''}.items():
everyentcharrefs[k] = v
everyentcharrefs['#%s' % ord(v)] = v
everyentcharrefsget = everyentcharrefs.get
re_everyentcharrefs = re.compile('&(%s|\#160|\#\d{1,5});' %
'|'.join(map(re.escape, everyentcharrefs)))
re_everyentcharrefssub = re_everyentcharrefs.sub
def _replAllXMLRef(match):
"""Replace the matched XML reference."""
ref = match.group(1)
value = everyentcharrefsget(ref)
if value is None:
if ref[0] == '#':
return unichr(int(ref[1:]))
else:
return ref
return value
def subXMLHTMLSGMLRefs(s):
"""Return the given string with XML/HTML/SGML entity and char references
replaced."""
return re_everyentcharrefssub(_replAllXMLRef, s)
def sortedSeasons(m):
"""Return a sorted list of seasons of the given series."""
seasons = m.get('episodes', {}).keys()
seasons.sort()
return seasons
def sortedEpisodes(m, season=None):
"""Return a sorted list of episodes of the given series,
considering only the specified season(s) (every season, if None)."""
episodes = []
seasons = season
if season is None:
seasons = sortedSeasons(m)
else:
if not isinstance(season, (tuple, list)):
seasons = [season]
for s in seasons:
eps_indx = m.get('episodes', {}).get(s, {}).keys()
eps_indx.sort()
for e in eps_indx:
episodes.append(m['episodes'][s][e])
return episodes
# Idea and portions of the code courtesy of none none (dclist at gmail.com)
_re_imdbIDurl = re.compile(r'\b(nm|tt|ch|co)([0-9]{7})\b')
def get_byURL(url, info=None, args=None, kwds=None):
"""Return a Movie, Person, Character or Company object for the given URL;
info is the info set to retrieve, args and kwds are respectively a list
and a dictionary or arguments to initialize the data access system.
Returns None if unable to correctly parse the url; can raise
exceptions if unable to retrieve the data."""
if args is None: args = []
if kwds is None: kwds = {}
ia = IMDb(*args, **kwds)
match = _re_imdbIDurl.search(url)
if not match:
return None
imdbtype = match.group(1)
imdbID = match.group(2)
if imdbtype == 'tt':
return ia.get_movie(imdbID, info=info)
elif imdbtype == 'nm':
return ia.get_person(imdbID, info=info)
elif imdbtype == 'ch':
return ia.get_character(imdbID, info=info)
elif imdbtype == 'co':
return ia.get_company(imdbID, info=info)
return None
# Idea and portions of code courtesy of Basil Shubin.
# Beware that these information are now available directly by
# the Movie/Person/Character instances.
def fullSizeCoverURL(obj):
"""Given an URL string or a Movie, Person or Character instance,
returns an URL to the full-size version of the cover/headshot,
or None otherwise. This function is obsolete: the same information
are available as keys: 'full-size cover url' and 'full-size headshot',
respectively for movies and persons/characters."""
if isinstance(obj, Movie):
coverUrl = obj.get('cover url')
elif isinstance(obj, (Person, Character)):
coverUrl = obj.get('headshot')
else:
coverUrl = obj
if not coverUrl:
return None
return _Container._re_fullsizeURL.sub('', coverUrl)
def keyToXML(key):
"""Return a key (the ones used to access information in Movie and
other classes instances) converted to the style of the XML output."""
return _tagAttr(key, '')[0]
def translateKey(key):
"""Translate a given key."""
return _(keyToXML(key))
# Maps tags to classes.
_MAP_TOP_OBJ = {
'person': Person,
'movie': Movie,
'character': Character,
'company': Company
}
# Tags to be converted to lists.
_TAGS_TO_LIST = dict([(x[0], None) for x in TAGS_TO_MODIFY.values()])
_TAGS_TO_LIST.update(_MAP_TOP_OBJ)
def tagToKey(tag):
"""Return the name of the tag, taking it from the 'key' attribute,
if present."""
keyAttr = tag.get('key')
if keyAttr:
if tag.get('keytype') == 'int':
keyAttr = int(keyAttr)
return keyAttr
return tag.name
def _valueWithType(tag, tagValue):
"""Return tagValue, handling some type conversions."""
tagType = tag.get('type')
if tagType == 'int':
tagValue = int(tagValue)
elif tagType == 'float':
tagValue = float(tagValue)
return tagValue
# Extra tags to get (if values were not already read from title/name).
_titleTags = ('imdbindex', 'kind', 'year')
_nameTags = ('imdbindex')
_companyTags = ('imdbindex', 'country')
def parseTags(tag, _topLevel=True, _as=None, _infoset2keys=None,
_key2infoset=None):
"""Recursively parse a tree of tags."""
# The returned object (usually a _Container subclass, but it can
# be a string, an int, a float, a list or a dictionary).
item = None
if _infoset2keys is None:
_infoset2keys = {}
if _key2infoset is None:
_key2infoset = {}
name = tagToKey(tag)
firstChild = tag.find(recursive=False)
tagStr = (tag.string or u'').strip()
if not tagStr and name == 'item':
# Handles 'item' tags containing text and a 'notes' sub-tag.
tagContent = tag.contents[0]
if isinstance(tagContent, BeautifulSoup.NavigableString):
tagStr = (unicode(tagContent) or u'').strip()
tagType = tag.get('type')
infoset = tag.get('infoset')
if infoset:
_key2infoset[name] = infoset
_infoset2keys.setdefault(infoset, []).append(name)
# Here we use tag.name to avoid tags like <item title="company">
if tag.name in _MAP_TOP_OBJ:
# One of the subclasses of _Container.
item = _MAP_TOP_OBJ[name]()
itemAs = tag.get('access-system')
if itemAs:
if not _as:
_as = itemAs
else:
itemAs = _as
item.accessSystem = itemAs
tagsToGet = []
theID = tag.get('id')
if name == 'movie':
item.movieID = theID
tagsToGet = _titleTags
theTitle = tag.find('title', recursive=False)
if tag.title:
item.set_title(tag.title.string)
tag.title.extract()
else:
if name == 'person':
item.personID = theID
tagsToGet = _nameTags
theName = tag.find('long imdb canonical name', recursive=False)
if not theName:
theName = tag.find('name', recursive=False)
elif name == 'character':
item.characterID = theID
tagsToGet = _nameTags
theName = tag.find('name', recursive=False)
elif name == 'company':
item.companyID = theID
tagsToGet = _companyTags
theName = tag.find('name', recursive=False)
if theName:
item.set_name(theName.string)
if theName:
theName.extract()
for t in tagsToGet:
if t in item.data:
continue
dataTag = tag.find(t, recursive=False)
if dataTag:
item.data[tagToKey(dataTag)] = _valueWithType(dataTag,
dataTag.string)
if tag.notes:
item.notes = tag.notes.string
tag.notes.extract()
episodeOf = tag.find('episode-of', recursive=False)
if episodeOf:
item.data['episode of'] = parseTags(episodeOf, _topLevel=False,
_as=_as, _infoset2keys=_infoset2keys,
_key2infoset=_key2infoset)
episodeOf.extract()
cRole = tag.find('current-role', recursive=False)
if cRole:
cr = parseTags(cRole, _topLevel=False, _as=_as,
_infoset2keys=_infoset2keys, _key2infoset=_key2infoset)
item.currentRole = cr
cRole.extract()
# XXX: big assumption, here. What about Movie instances used
# as keys in dictionaries? What about other keys (season and
# episode number, for example?)
if not _topLevel:
#tag.extract()
return item
_adder = lambda key, value: item.data.update({key: value})
elif tagStr:
if tag.notes:
notes = (tag.notes.string or u'').strip()
if notes:
tagStr += u'::%s' % notes
else:
tagStr = _valueWithType(tag, tagStr)
return tagStr
elif firstChild:
firstChildName = tagToKey(firstChild)
if firstChildName in _TAGS_TO_LIST:
item = []
_adder = lambda key, value: item.append(value)
else:
item = {}
_adder = lambda key, value: item.update({key: value})
else:
item = {}
_adder = lambda key, value: item.update({name: value})
for subTag in tag(recursive=False):
subTagKey = tagToKey(subTag)
# Exclude dinamically generated keys.
if tag.name in _MAP_TOP_OBJ and subTagKey in item._additional_keys():
continue
subItem = parseTags(subTag, _topLevel=False, _as=_as,
_infoset2keys=_infoset2keys, _key2infoset=_key2infoset)
if subItem:
_adder(subTagKey, subItem)
if _topLevel and name in _MAP_TOP_OBJ:
# Add information about 'info sets', but only to the top-level object.
item.infoset2keys = _infoset2keys
item.key2infoset = _key2infoset
item.current_info = _infoset2keys.keys()
return item
def parseXML(xml):
"""Parse a XML string, returning an appropriate object (usually an
instance of a subclass of _Container."""
xmlObj = BeautifulSoup.BeautifulStoneSoup(xml,
convertEntities=BeautifulSoup.BeautifulStoneSoup.XHTML_ENTITIES)
if xmlObj:
mainTag = xmlObj.find()
if mainTag:
return parseTags(mainTag)
return None
| [
[
8,
0,
0.021,
0.0401,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0474,
0.0018,
0,
0.66,
0.0192,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0493,
0.0018,
0,
0.66,... | [
"\"\"\"\nhelpers module (imdb package).\n\nThis module provides functions not used directly by the imdb package,\nbut useful for IMDbPY-based programs.\n\nCopyright 2006-2010 Davide Alberani <da@erlug.linux.it>",
"import re",
"from cgi import escape",
"import gettext",
"from gettext import gettext as _",
... |
"""
company module (imdb package).
This module provides the company class, used to store information about
a given company.
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_company_name, build_company_name, \
flatten, _Container, cmpCompanies
class Company(_Container):
"""A company.
Every information about a company can be accessed as:
companyObject['information']
to get a list of the kind of information stored in a
company object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main',)
# Aliases for some not-so-intuitive keys.
keys_alias = {
'distributor': 'distributors',
'special effects company': 'special effects companies',
'other company': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'production company': 'production companies'}
keys_tomodify_list = ()
cmpFunct = cmpCompanies
def _init(self, **kwds):
"""Initialize a company object.
*companyID* -- the unique identifier for the company.
*name* -- the name of the company, if not in the data dictionary.
*myName* -- the nickname you use for this company.
*myID* -- your personal id for this company.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given company.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to companies.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.companyID = kwds.get('companyID', None)
self.myName = kwds.get('myName', u'')
def _reset(self):
"""Reset the company object."""
self.companyID = None
self.myName = u''
def set_name(self, name):
"""Set the name of the company."""
# XXX: convert name to unicode, if it's a plain string?
# Company diverges a bit from other classes, being able
# to directly handle its "notes". AND THAT'S PROBABLY A BAD IDEA!
oname = name = name.strip()
notes = u''
if name.endswith(')'):
fparidx = name.find('(')
if fparidx != -1:
notes = name[fparidx:]
name = name[:fparidx].rstrip()
if self.notes:
name = oname
d = analyze_company_name(name)
self.data.update(d)
if notes and not self.notes:
self.notes = notes
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
if self.data.has_key('name'):
return ['long imdb name']
return []
def _getitem(self, key):
"""Handle special keys."""
## XXX: can a company have an imdbIndex?
if self.data.has_key('name'):
if key == 'long imdb name':
return build_company_name(self.data)
return None
def getID(self):
"""Return the companyID."""
return self.companyID
def __nonzero__(self):
"""The company is "false" if the self.data does not contain a name."""
# XXX: check the name and the companyID?
if self.data.get('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this company and the given Movie are related."""
from Movie import Movie
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
return 0
def isSameName(self, other):
"""Return true if two company have the same name
and/or companyID."""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_company_name(self.data) == \
build_company_name(other.data):
return 1
if self.accessSystem == other.accessSystem and \
self.companyID is not None and \
self.companyID == other.companyID:
return 1
return 0
isSameCompany = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a company instance."""
c = Company(name=u'', companyID=self.companyID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Company object."""
r = '<Company id:%s[%s] name:_%s_>' % (self.companyID,
self.accessSystem,
self.get('long imdb name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the company."""
if not self: return u''
s = u'Company\n=======\nName: %s\n' % \
self.get('name', u'')
for k in ('distributor', 'production company', 'miscellaneous company',
'special effects company'):
d = self.get(k, [])[:5]
if not d: continue
s += u'Last movies from this company (%s): %s.\n' % \
(k, u'; '.join([x.get('long imdb title', u'') for x in d]))
return s
| [
[
8,
0,
0.059,
0.1128,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1231,
0.0051,
0,
0.66,
0.3333,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.1359,
0.0103,
0,
0.66,... | [
"\"\"\"\ncompany module (imdb package).\n\nThis module provides the company class, used to store information about\na given company.\n\nCopyright 2008-2009 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb.utils import analyze_company_name, build_company_name, \\\n ... |
"""
topMovies.py
Prints top 10 movies, by ratings.
"""
import sys
# Import the IMDbPY package.
try:
import imdb
except ImportError:
print 'You need to install the IMDbPY package!'
sys.exit(1)
def unic(string):
try:
print string
except UnicodeEncodeError:
print 'bad movie title'
i = imdb.IMDb()
top250 = i.get_top250_movies()
print ''
print 'top 250 movies'
print 'rating\tvotes\ttitle'
for movie in top250:
movie.get('title')
unic('%s\t%s\t%s' % (movie.get('rating'), movie.get('votes'),
movie['long imdb title']))
| [
[
8,
0,
0.0882,
0.1471,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2059,
0.0294,
0,
0.66,
0.1111,
509,
0,
1,
0,
0,
509,
0,
0
],
[
7,
0,
0.3529,
0.1471,
0,
0.66... | [
"\"\"\"\ntopMovies.py\n\nPrints top 10 movies, by ratings.\n\"\"\"",
"import sys",
"try:\n import imdb\nexcept ImportError:\n print('You need to install the IMDbPY package!')\n sys.exit(1)",
" import imdb",
" print('You need to install the IMDbPY package!')",
" sys.exit(1)",
"def unic(... |
'''
Created on 21/04/2011
@author: Eran_Z
'''
| [
[
8,
0,
0.6,
1,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
]
] | [
"'''\nCreated on 21/04/2011\n\n@author: Eran_Z\n'''"
] |
'''
Created on 29/03/2011
@author: Eran_Z
Scoring
'''
from search_m import searchSingle, searchTogether
from util_m import sum, BadMovie
#Helper functions:
def __normalizedSigmaMutualWeightHelper(w, ci, wi):
ss = searchSingle(w)
st = searchTogether(w, ci)
#return 0 if ss < st else st*wi*1.0/ss
if ss < st:
raise BadMovie
return st*wi*1.0/ss
def __normalizedSigmaMutualWeight(w, context, weights):
try:
return reduce(sum, map(lambda i:__normalizedSigmaMutualWeightHelper(w, context[i], weights[i]), range(len(context))))
except BadMovie:
return 0
########################################################
########################################################
#Main functions:
def normalizedMutualInformationScorer(context, weights, world):
return map(lambda w:__normalizedSigmaMutualWeight(w, context, weights), world)
#------------- UNUSED EXTENSIONS --------------------
from search_m import searchExclusion, NGD
def __sigmaMutualWeight(w, context, weights):
return reduce(sum, map(lambda i:searchTogether(w, context[i])*weights[i], range(len(context))))
def basicScorer(context, weights, world):
#Note: uses searchExclusion, which is uncached...
return map(lambda w:__sigmaMutualWeight(w, context, weights)*1.0/(searchSingle(w)-searchExclusion(w, context)), world)
def NGD1Scorer(context, weights, world):
return 1.0/(reduce(sum, map(lambda i:NGD(world[i], context[i])*weights[i], range(len(context)))))
def NGD2Scorer(context, weights, world):
return reduce(sum, map(lambda i:1.0/(NGD(world[i], context[i])*weights[i]), range(len(context))))
def regularMutualInformationScorer(context, weights, world):
return map(lambda w:__sigmaMutualWeight(w, context, weights), world)
#-----------------------------------------
scoringAlgorithms = {"Basic": basicScorer, "NGD Type 1": NGD1Scorer, "NGD Type 2": NGD2Scorer,
"Regular Mutual Information": regularMutualInformationScorer,
"Normalized Mutual Information": normalizedMutualInformationScorer }
| [
[
8,
0,
0.0625,
0.1094,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1406,
0.0156,
0,
0.66,
0.0833,
784,
0,
2,
0,
0,
784,
0,
0
],
[
1,
0,
0.1562,
0.0156,
0,
0.66... | [
"'''\nCreated on 29/03/2011\n\n@author: Eran_Z\n\nScoring\n'''",
"from search_m import searchSingle, searchTogether",
"from util_m import sum, BadMovie",
"def __normalizedSigmaMutualWeightHelper(w, ci, wi):\n ss = searchSingle(w)\n st = searchTogether(w, ci)\n #return 0 if ss < st else st*wi*1.0/ss\n... |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1429,
0.0714,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
1,
0,
0.2143,
0.0714,
0,
0.66,
0.25,
201,
0,
1,
0,
0,
201,
0,
0
],
[
7,
0,
0.4643,
0.4286,
0,
0.... | [
"from django.core.management import execute_manager",
"import imp",
"try:\n imp.find_module('settings') # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized thi... |
from django.db import models
class Movie(models.Model):
title = models.CharField(max_length=200, primary_key=True)
year = models.PositiveSmallIntegerField()
searchPair = models.ManyToManyField('self', symmetrical=False, blank=True, through='SearchResult')
searchSingle = models.BigIntegerField()
link = models.CharField(max_length=256)
def __unicode__(self):
return '%s (%s)' % (self.title, self.year)
class SearchResult(models.Model):
movie1 = models.ForeignKey(Movie, related_name='searchresult_set1')
movie2 = models.ForeignKey(Movie, related_name='searchresult_set2')
numResults = models.BigIntegerField()
def __unicode__(self):
name1 = getattr(self.movie1, 'title')
name2 = getattr(self.movie2, 'title')
# can also use: getattr(self, 'movie1').__unicode__()
return '"%s" & "%s"' % (name1, name2)
| [
[
1,
0,
0.0476,
0.0476,
0,
0.66,
0,
40,
0,
1,
0,
0,
40,
0,
0
],
[
3,
0,
0.3095,
0.381,
0,
0.66,
0.5,
945,
0,
1,
0,
0,
996,
0,
5
],
[
14,
1,
0.1905,
0.0476,
1,
0.18,... | [
"from django.db import models",
"class Movie(models.Model):\n title = models.CharField(max_length=200, primary_key=True)\n year = models.PositiveSmallIntegerField()\n searchPair = models.ManyToManyField('self', symmetrical=False, blank=True, through='SearchResult')\n searchSingle = models.BigIntegerFi... |
"""
Test Movies module
"""
from django.test import TestCase
from RSM.my_friends.models import RSMUser, User
from RSM.my_friends.views import getUserNames
from RSM.my_movies.views import __updateUserMoviesList
from RSM.util import getRSMUser
from RSM.algorithm.search.makeCache import makeTestCache
getUserNames = getUserNames
updateUserMoviesList = __updateUserMoviesList
getRSMUser = getRSMUser
makeTestCache = makeTestCache
class MoviesTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nMovies Test" + str(MoviesTest.testCount) + "\n"
if (MoviesTest.added):
return
#adding a user to the system
user = User.objects.create_user("Tomer", "t@t.com", "1234")
user.save()
RSMUser(user=user).save()
#add some movies to the system
makeTestCache()
def tearDown(self):
print "\nMovies Test" + str(MoviesTest.testCount) + "\n"
MoviesTest.testCount += 1
def test_1_addMovieToSeen(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.seen.all()),3)
print "There are 3 movies in seen"
def test_2_addMovieToLikes(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov3","Mov1"]
username = "Tomer"
movieType = "Likes"
updateUserMoviesList(username,selectedList,movieType)
print "Added 2 movies to likes"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.likes.all()),2)
print "There are 2 movies in likes"
def test_3_addMovieToBlacklist(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov2"]
username = "Tomer"
movieType = "Blacklist"
updateUserMoviesList(username,selectedList,movieType)
print "Added 1 movie to blacklist"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.blacklist.all()),1)
print "There is 1 movie in blacklist"
def test_4_addUnSeenMovieToBlacklist(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov5"]
username = "Tomer"
movieType = "Blacklist"
updateUserMoviesList(username,selectedList,movieType)
print "Added 1 movie to blacklist, movie not in seen"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.blacklist.all()),1)
print "There is 1 movie in blacklist"
def test_5_addUnSeenMovieToLikes(self):
#adding movies to the list
selectedList = ["Mov1","Mov2","Mov3"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
print "Added 3 movies to seen"
selectedList = ["Mov5"]
username = "Tomer"
movieType = "Likes"
updateUserMoviesList(username,selectedList,movieType)
print "Added 1 movie to likes"
currentUser = getRSMUser(username)
#asserting addition
self.assertEquals(len(currentUser.likes.all()),1)
print "There is 1 movie in likes" | [
[
8,
0,
0.0172,
0.0259,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0431,
0.0086,
0,
0.66,
0.0909,
944,
0,
1,
0,
0,
944,
0,
0
],
[
1,
0,
0.0517,
0.0086,
0,
0.66... | [
"\"\"\"\nTest Movies module \n\"\"\"",
"from django.test import TestCase",
"from RSM.my_friends.models import RSMUser, User",
"from RSM.my_friends.views import getUserNames",
"from RSM.my_movies.views import __updateUserMoviesList",
"from RSM.util import getRSMUser",
"from RSM.algorithm.search.makeCache... |
'''
Created on Apr 29, 2011
@author: shurok
'''
from django.conf.urls.defaults import patterns
urlpatterns = patterns('my_movies.views',
(r'^viewMovieTypes/viewMovies/$','viewMovies'),
(r'^viewMovieTypes/$','viewMovieTypes'),
) | [
[
8,
0,
0.25,
0.4167,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.5833,
0.0833,
0,
0.66,
0.5,
341,
0,
1,
0,
0,
341,
0,
0
],
[
14,
0,
0.875,
0.3333,
0,
0.66,
... | [
"'''\nCreated on Apr 29, 2011\n\n@author: shurok\n'''",
"from django.conf.urls.defaults import patterns",
"urlpatterns = patterns('my_movies.views',\n (r'^viewMovieTypes/viewMovies/$','viewMovies'),\n (r'^viewMovieTypes/$','viewMovieTypes'),\n)"
] |
'''
Created on 21/04/2011
@author: Eran_Z
'''
from django.contrib import admin
from models import Movie, SearchResult
admin.site.register(Movie)
admin.site.register(SearchResult)
| [
[
8,
0,
0.2727,
0.4545,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6364,
0.0909,
0,
0.66,
0.25,
302,
0,
1,
0,
0,
302,
0,
0
],
[
1,
0,
0.7273,
0.0909,
0,
0.66,
... | [
"'''\nCreated on 21/04/2011\n\n@author: Eran_Z\n'''",
"from django.contrib import admin",
"from models import Movie, SearchResult",
"admin.site.register(Movie)",
"admin.site.register(SearchResult)"
] |
from django.template import RequestContext
from django.shortcuts import render_to_response
from models import Movie
from RSM.util import getRSMUser, verifySessionValidity, printDebug, getTitle
def viewMovieTypes(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
__updateUserMoviesList(request.user.username, request.POST.getlist(u'selectedList'), request.POST.get(u'movieType'))
return render_to_response("my_movies/movieTypes.html",context_instance=RequestContext(request))
def viewMovies(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
movieType = request.POST["movieType"]
movieNames = []
currentUser = getRSMUser(request.user.username)
allMovies = set(Movie.objects.all())
if movieType == "Seen":
movieNames = [u.title for u in currentUser.seen.all()]
elif movieType == "Likes":
movieNames = [u.title for u in currentUser.likes.all()]
allMovies -= set(currentUser.blacklist.all())
elif movieType == "Blacklist":
movieNames = [u.title for u in currentUser.blacklist.all()]
allMovies -= set(currentUser.likes.all())
else:
printDebug('BUG in my_movies view: viewMovies')
variables = {"movienames": movieNames, "movietype": movieType, "allmovies": map(getTitle, allMovies)}
return render_to_response("my_movies/viewmovies.html", variables, context_instance=RequestContext(request))
def __updateUserMoviesList(username, selectedList, movieType):
if not movieType:
return
currentUser = getRSMUser(username)
if movieType == "Seen":
list = currentUser.seen
elif movieType == "Likes":
list = currentUser.likes
elif movieType == "Blacklist":
list = currentUser.blacklist
else:
printDebug('BUG in my_movies view: __updateUserMoviesList')
list.clear()
for m in selectedList:
list.add(Movie.objects.get(title=m))
| [
[
1,
0,
0.0179,
0.0179,
0,
0.66,
0,
213,
0,
1,
0,
0,
213,
0,
0
],
[
1,
0,
0.0357,
0.0179,
0,
0.66,
0.1667,
852,
0,
1,
0,
0,
852,
0,
0
],
[
1,
0,
0.0536,
0.0179,
0,
... | [
"from django.template import RequestContext",
"from django.shortcuts import render_to_response",
"from models import Movie",
"from RSM.util import getRSMUser, verifySessionValidity, printDebug, getTitle",
"def viewMovieTypes(request):\n nonValidSessionFlag = verifySessionValidity(request)\n if nonVali... |
#from django.db import models
#from RSM.my_movies.models import *
#from RSM.my_friends.models import *
| [] | [] |
"""
Test recommendations module
"""
from django.test import TestCase
from RSM.my_friends.models import RSMUser, User
from RSM.my_friends.views import getUserNames
from RSM.my_movies.views import __updateUserMoviesList
from RSM.util import getRSMUser
from RSM.algorithm.search.makeCache import makeTestCache
getUserNames = getUserNames
updateUserMoviesList = __updateUserMoviesList
getRSMUser = getRSMUser
makeTestCache = makeTestCache
class RecommendationsTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nRecommendations Test" + str(RecommendationsTest.testCount) + "\n"
if (RecommendationsTest.added):
return
#adding some users to the system
user1 = User.objects.create_user("Tomer", "t@t.com", "1234")
user1.save()
RSMUser(user=user1).save()
user2 = User.objects.create_user("Eran", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
user2 = User.objects.create_user("Alex", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
#add some movies to the system
makeTestCache()
#create test setup
#Setting movie data
selectedList = ["Mov1","Mov2"]
username = "Tomer"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
selectedList = ["Mov4"]
username = "Tomer"
movieType = "Blacklist"
updateUserMoviesList(username,selectedList,movieType)
selectedList = ["Mov2","Mov5"]
username = "Eran"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
selectedList = ["Mov5","Mov1"]
username = "Alex"
movieType = "Seen"
updateUserMoviesList(username,selectedList,movieType)
#setting friendships
self.client.login(password=u"1234",username="Tomer")
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
#setting groups
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"addFriend","groupname":"testGroup1"})
def tearDown(self):
print "\nRecommendations Test" + str(RecommendationsTest.testCount) + "\n"
RecommendationsTest.testCount += 1
def test_1_basicRecommend(self):
print "basic recommend: ask for recommendation to self based only on seen & black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/")
self.assertContains(response, "Mov3")
self.assertContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies were recommended"
def test_2_recommendSeen(self):
print "recommend with seen movies: ask for recommendation to self based only on black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/",{"recommendSeen":"true"})
self.assertContains(response, "Mov3")
self.assertContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertContains(response, "Mov1")
self.assertContains(response, "Mov2")
print "Only non blacklisted movies were recommended"
def test_3_recommendWithFriends(self):
print "recommend with friends: ask for recommendation to self based on self & friends seen & black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/",{"friendList":("Alex","Eran")})
self.assertContains(response, "Mov3")
self.assertNotContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies were recommended considering friends movies as well"
def test_4_recommendWithGroups(self):
print "recommend with groups: ask for recommendation to self based on self & group seen & black list"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/",{"groupList":"testGroup1"})
self.assertContains(response, "Mov3")
self.assertNotContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies were recommended considering group movies as well"
def test_5_recommendWithGroupsVsFriends(self):
print "comparing group and friends results"
self.client.login(password=u"1234",username="Tomer")
response1 = self.client.post("/recommendation/recommend/",{"groupList":"testGroup1"})
response2 = self.client.post("/recommendation/recommend/",{"friendList":("Alex","Eran")})
self.assertEqual(str(response1),str(response2))
print "the results are equal"
def test_6_basicRecommend(self):
print "selected recommend: ask for recommendation to self based only on seen & black list & chosen movies"
self.client.login(password=u"1234",username="Tomer")
response = self.client.post("/recommendation/recommend/", {"selectedList":("Mov3","Mov2")})
self.assertContains(response, "Mov3")
self.assertNotContains(response, "Mov5")
self.assertNotContains(response, "Mov4")
self.assertNotContains(response, "Mov1")
self.assertNotContains(response, "Mov2")
print "Only unseen and non blacklisted movies from the list were recommended"
| [
[
8,
0,
0.0152,
0.0227,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0379,
0.0076,
0,
0.66,
0.0909,
944,
0,
1,
0,
0,
944,
0,
0
],
[
1,
0,
0.0455,
0.0076,
0,
0.66... | [
"\"\"\"\nTest recommendations module \n\"\"\"",
"from django.test import TestCase",
"from RSM.my_friends.models import RSMUser, User",
"from RSM.my_friends.views import getUserNames",
"from RSM.my_movies.views import __updateUserMoviesList",
"from RSM.util import getRSMUser",
"from RSM.algorithm.search.... |
'''
Created on 21/04/2011
@author: Eran_Z
'''
from django.conf.urls.defaults import patterns
urlpatterns = patterns('recommendation.views',
(r'^$', 'chooseFriends'),
(r'^recommend/$', 'recommend'),
) | [
[
8,
0,
0.2727,
0.4545,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.5455,
0.0909,
0,
0.66,
0.5,
341,
0,
1,
0,
0,
341,
0,
0
],
[
14,
0,
0.8636,
0.3636,
0,
0.66,
... | [
"'''\nCreated on 21/04/2011\n\n@author: Eran_Z\n'''",
"from django.conf.urls.defaults import patterns",
"urlpatterns = patterns('recommendation.views',\n (r'^$', 'chooseFriends'),\n (r'^recommend/$', 'recommend'),\n)"
] |
from django.template import RequestContext
from django.shortcuts import render_to_response
from RSM.algorithm.feasibility import COR_algorithm
from RSM.my_movies.models import Movie
from RSM.util import verifySessionValidity, getRSMUser, getRSMGroup, getTitle
recommendationLimit = 5
def recommend(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currUser = getRSMUser(request.user.username)
recommendSeen = bool(request.POST.get(u'recommendSeen'))
limitWorld = bool(request.POST.get(u'limitWorld'))
if limitWorld:
#after clicking 'recommend' button with the 'limit world' checkbox checked
return __recommend_limitWorld(request, recommendSeen)
if request.POST.get(u'movieList'):
#redirected after updating likes, blacklist, etc.
return __recommend_afterUpdatingCheckboxes(request, currUser)
#first time in this page:
return __recommend_firstTime(request, currUser, recommendSeen)
def __recommend_limitWorld(request, recommendSeen):
return render_to_response('recommendation/chooseMovieSet.html', {'friendList': request.POST.getlist(u'friendList'), 'groupList': request.POST.getlist(u'groupList'), 'recommendSeen': recommendSeen, 'movies': map(getTitle, Movie.objects.all()) }, context_instance=RequestContext(request))
def __recommend_afterUpdatingCheckboxes(request, currUser):
movieList = request.POST.getlist(u'movieList')
new_seen = request.POST.getlist(u'seen')
new_like = request.POST.getlist(u'like')
new_blacklist = request.POST.getlist(u'blacklist')
__updateList(currUser.seen, new_seen, movieList)
__updateList(currUser.likes, new_like, movieList)
__updateList(currUser.blacklist, new_blacklist, movieList)
currUser.save()
context = set(map(getTitle, currUser.likes.all()))
seen = set(map(getTitle, currUser.seen.all()))
blacklist = set(map(getTitle, currUser.blacklist.all()))
movieResults = map(lambda r: Movie.objects.get(title=r), movieList)
return render_to_response('recommendation/results.html', {'results': [(m.title, m.year, m.link, (m.title in context), (m.title in seen), (m.title in blacklist))
for m in movieResults]}, context_instance=RequestContext(request))
def __recommend_firstTime(request, currUser, recommendSeen):
context = set(map(getTitle, currUser.likes.all()))
seen = set(map(getTitle, currUser.seen.all()))
blacklist = set(map(getTitle, currUser.blacklist.all()))
friends = set(map(lambda uname: getRSMUser(uname), request.POST.getlist(u'friendList')))
groups = map(lambda gn: getRSMGroup(currUser.user.username, gn), request.POST.getlist(u'groupList'))
for g in groups:
friends |= set(g.members.all())
friendsContext = context.copy()
if not recommendSeen:
friendsSeen = seen.copy()
friendsBlacklist = blacklist.copy()
for friend in friends:
friendsContext |= set(map(getTitle, friend.likes.all()))
if not recommendSeen:
friendsSeen |= set(map(getTitle, friend.seen.all()))
friendsBlacklist |= set(map(getTitle, friend.blacklist.all()))
world = __getWorld(friendsSeen if not recommendSeen else set(), friendsBlacklist)
if request.POST.get(u'selectedList'):
#after limiting world
world &= set(request.POST.getlist(u'selectedList'))
if not world:
return render_to_response('recommendation/noMatch.html', context_instance=RequestContext(request))
world = list(world)
if not friendsContext:
results = world[:recommendationLimit]
else:
results = COR_algorithm("Mutual Information", "Normalized Mutual Information", list(friendsContext), world)[:recommendationLimit]
movieResults = map(lambda r: Movie.objects.get(title=r), results)
return render_to_response('recommendation/results.html', {'results': [(m.title, m.year, m.link, (m.title in context), (m.title in seen), (m.title in blacklist))
for m in movieResults]}, context_instance=RequestContext(request))
def chooseFriends(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currUser = getRSMUser(request.user.username)
friends = currUser.friends.all()
groups = currUser.groups.all()
return render_to_response('recommendation/chooseFriends.html', {'friends': friends, 'groups': groups}, context_instance=RequestContext(request))
#Private functions
def __getUnfilteredWorld():
return map(getTitle, Movie.objects.all())
def __getWorld(seen, blacklist):
return set(__getUnfilteredWorld()) - seen - blacklist
def __updateList(src, update, all):
for m in set(all):
try:
src.remove(Movie.objects.get(title=m))
except:
pass
for m in set(update):
try:
src.add(Movie.objects.get(title=m))
except:
pass
#src = (src.all() - set(all)) | set(update)
| [
[
1,
0,
0.0083,
0.0083,
0,
0.66,
0,
213,
0,
1,
0,
0,
213,
0,
0
],
[
1,
0,
0.0165,
0.0083,
0,
0.66,
0.0769,
852,
0,
1,
0,
0,
852,
0,
0
],
[
1,
0,
0.0248,
0.0083,
0,
... | [
"from django.template import RequestContext",
"from django.shortcuts import render_to_response",
"from RSM.algorithm.feasibility import COR_algorithm",
"from RSM.my_movies.models import Movie",
"from RSM.util import verifySessionValidity, getRSMUser, getRSMGroup, getTitle",
"recommendationLimit = 5",
"d... |
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1429,
0.0714,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
1,
0,
0.2143,
0.0714,
0,
0.66,
0.25,
201,
0,
1,
0,
0,
201,
0,
0
],
[
7,
0,
0.4643,
0.4286,
0,
0.... | [
"from django.core.management import execute_manager",
"import imp",
"try:\n imp.find_module('settings') # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized thi... |
from django.conf.urls.defaults import patterns, include, url
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf import settings
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^recommendation/', include('recommendation.urls')),
(r'^friends/', include('my_friends.urls')),
(r'^movies/', include('my_movies.urls')),
(r'^groups/', include('my_groups.urls')),
# Examples:
# url(r'^$', 'RSM.views.home', name='home'),
# url(r'^RSM/', include('RSM.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
# 'main' views
urlpatterns += patterns('RSM.views',
url(r'^$','index'),
url(r'^login/$','custom_login'),
url(r'^register/$','custom_register'),
url(r'^aboutUs/$','about_us'),
url(r'^profile/$','custom_profile'),
)
if settings.DEBUG:
# cache creation views
urlpatterns += patterns('',
(r'^resetCacheFiles/$', 'RSM.algorithm.search.makeCache.createBestMoviesFiles'),
(r'^resetCache/$', 'RSM.algorithm.search.makeCache.makeCache'),
)
| [
[
1,
0,
0.0244,
0.0244,
0,
0.66,
0,
341,
0,
3,
0,
0,
341,
0,
0
],
[
1,
0,
0.0488,
0.0244,
0,
0.66,
0.1667,
828,
0,
1,
0,
0,
828,
0,
0
],
[
1,
0,
0.0732,
0.0244,
0,
... | [
"from django.conf.urls.defaults import patterns, include, url",
"from django.contrib.staticfiles.urls import staticfiles_urlpatterns",
"from django.conf import settings",
"from django.contrib import admin",
"admin.autodiscover()",
"urlpatterns = patterns('',\n (r'^recommendation/', include('recommendat... |
# Django settings for RSM project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DEBUG_PRINTS = DEBUG
SITE_ROOT = os.path.dirname(os.path.realpath(__file__)).replace('\\','/')
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(SITE_ROOT, '../sqlite3.db'), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Asia/Jerusalem'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'templates'),
os.path.join(SITE_ROOT, 'templates/static'),
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a=ec)ul(6pvifzv_h(o1h30we=b*0&+2z*48d@$qr5j)+$vdrm'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.csrf.CsrfResponseMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'RSM.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(SITE_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'RSM.recommendation',
'RSM.my_friends',
'RSM.my_movies',
'RSM.my_groups',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# 'doj', #deployment
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| [
[
1,
0,
0.0127,
0.0064,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
14,
0,
0.0255,
0.0064,
0,
0.66,
0.0385,
309,
1,
0,
0,
0,
0,
4,
0
],
[
14,
0,
0.0318,
0.0064,
0,
... | [
"import os",
"DEBUG = True",
"TEMPLATE_DEBUG = DEBUG",
"DEBUG_PRINTS = DEBUG",
"SITE_ROOT = os.path.dirname(os.path.realpath(__file__)).replace('\\\\','/')",
"ADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)",
"MANAGERS = ADMINS",
"DATABASES = {\n 'default': {\n 'ENGINE': 'djang... |
from django.shortcuts import redirect, get_object_or_404
from my_friends.models import RSMUser
from my_groups.models import RSMGroup
import string
from django.conf import settings
def getRSMUser(username):
return get_object_or_404(RSMUser, user__username=username)
def getRSMGroup(username, groupname):
return get_object_or_404(RSMGroup, owner=username,groupName=groupname )
def getRSMGroup1(groupname):
return get_object_or_404(RSMGroup,groupName=groupname )
def getUserNames(rsmUsers):
return [u.user.username for u in rsmUsers]
def printDebug(str):
if settings.DEBUG_PRINTS:
print str
def verifySessionValidity(request):
if not request.user.is_authenticated():
return redirect('/?logout=true&reason=expired')
return None
def getExtend(request):
return "basic/homepage.html" if request.user.is_authenticated() else "basic/loggedOutMenu.html"
def getTitle(m):
return m.title
#These functions return True iff s is a string that we consider legal:
def isLegalName(s):
return __isAcceptableName(s, '"<>&')
def isSafeName(s):
return __isAcceptableName(s, '"\'<>&')
def __isAcceptableName(s, illegal_chars):
for c in s:
if c not in string.printable or c in illegal_chars:
return False
return True
| [
[
1,
0,
0.0217,
0.0217,
0,
0.66,
0,
852,
0,
2,
0,
0,
852,
0,
0
],
[
1,
0,
0.0435,
0.0217,
0,
0.66,
0.0667,
925,
0,
1,
0,
0,
925,
0,
0
],
[
1,
0,
0.0652,
0.0217,
0,
... | [
"from django.shortcuts import redirect, get_object_or_404",
"from my_friends.models import RSMUser",
"from my_groups.models import RSMGroup",
"import string",
"from django.conf import settings",
"def getRSMUser(username):\n return get_object_or_404(RSMUser, user__username=username)",
" return get_... |
from django.contrib import auth
from django.contrib.auth.models import User
from django.template import RequestContext
from django.db import IntegrityError
from django.shortcuts import render_to_response, get_object_or_404, redirect
from RSM.my_friends.models import RSMUser
from RSM.util import printDebug, getExtend, verifySessionValidity, isSafeName
def index(request):
if request.user.is_authenticated():
if request.GET.get("change"):
if request.POST:
if (request.POST.get("pass") == request.POST.get("repPass")):
request.user.set_password(request.POST.get("pass"))
request.user.save()
auth.logout(request)
return render_to_response("basic/welcomepage.html",context_instance=RequestContext(request))
else:
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message':reasons['passMismatch']}, context_instance=RequestContext(request))
else:
return redirect('/')
if request.GET.get("logout"):
auth.logout(request)
variables = {}
if request.GET.get("reason"):
reason = request.GET.get("reason")
if reasons.has_key(reason):
variables = {"reason": reasons[reason]}
return render_to_response('basic/welcomepage.html', variables, context_instance=RequestContext(request))
rsmUser = get_object_or_404(RSMUser, user__username=request.user.username)
return render_to_response('basic/homepage.html', {"user": rsmUser}, context_instance=RequestContext(request))
else:
return render_to_response('basic/welcomepage.html', context_instance=RequestContext(request))
def custom_login(request):
if request.POST:
action = request.POST.get('action')
printDebug(action)
if (action == 'Login'):
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if (user is not None and user.is_active):
auth.login(request, user)
rsmUser = get_object_or_404(RSMUser, user__username=request.user.username)
return render_to_response('basic/homepage.html', {"user": rsmUser}, context_instance=RequestContext(request))
else:
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['uNamePassErr']},context_instance=RequestContext(request))
else:
return redirect('/')
else:
return redirect('/')
def about_us(request):
return render_to_response('basic/aboutUs.html', {'extend': getExtend(request)}, context_instance=RequestContext(request))
def custom_register(request):
if request.POST:
email = request.POST.get('email','')
password = request.POST.get('pass', '')
repeatPassword = request.POST.get('repPass', '')
username = request.POST.get('inputUsername','')
if not __isLegalUsername(username):
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['illUName']}, context_instance=RequestContext(request))
if password != repeatPassword:
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['passMismatch']}, context_instance=RequestContext(request))
try:
user = User.objects.create_user(username, email, password)
rsmUser = RSMUser()
rsmUser.user=user
rsmUser.save()
except IntegrityError:
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message': reasons['userExists']}, context_instance=RequestContext(request))
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
auth.login(request, user)
return render_to_response('basic/homepage.html', {"username": user}, context_instance=RequestContext(request))
else:
return render_to_response('basic/errorPage.html', {'extend': getExtend(request), 'message': reasons['expired']}, context_instance=RequestContext(request))
return render_to_response('basic/register.html', context_instance=RequestContext(request))
def custom_profile(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
rsmUser = get_object_or_404(RSMUser, user__username=request.user.username)
return render_to_response("basic/profile.html", {"user": rsmUser},context_instance=RequestContext(request))
def __isLegalUsername(username):
return username and isSafeName(username) and ' ' not in username and '%' not in username
reasons = {'expired': 'Your session has expired.', 'passMismatch': "Passwords don't match.", 'uNamePassErr': "Wrong username or password!", 'illUName': "Illegal username.", 'userExists': "Username is already taken."}
| [
[
1,
0,
0.0104,
0.0104,
0,
0.66,
0,
302,
0,
1,
0,
0,
302,
0,
0
],
[
1,
0,
0.0208,
0.0104,
0,
0.66,
0.0769,
808,
0,
1,
0,
0,
808,
0,
0
],
[
1,
0,
0.0312,
0.0104,
0,
... | [
"from django.contrib import auth",
"from django.contrib.auth.models import User",
"from django.template import RequestContext",
"from django.db import IntegrityError",
"from django.shortcuts import render_to_response, get_object_or_404, redirect",
"from RSM.my_friends.models import RSMUser",
"from RSM.u... |
from django.db import models
from django.contrib.auth.models import User
class RSMUser(models.Model):
user = models.OneToOneField(User)
friends = models.ManyToManyField('self', symmetrical=False, blank=True)
seen = models.ManyToManyField('my_movies.Movie', related_name='viewers', blank=True)
likes = models.ManyToManyField('my_movies.Movie', related_name='fans', blank=True)
blacklist = models.ManyToManyField('my_movies.Movie', related_name='haters', blank=True)
groups = models.ManyToManyField('my_groups.RSMGroup', related_name='groupps', blank=True)
def __unicode__(self):
return self.user.__str__()
| [
[
1,
0,
0.0833,
0.0833,
0,
0.66,
0,
40,
0,
1,
0,
0,
40,
0,
0
],
[
1,
0,
0.1667,
0.0833,
0,
0.66,
0.5,
808,
0,
1,
0,
0,
808,
0,
0
],
[
3,
0,
0.6667,
0.75,
0,
0.66,
... | [
"from django.db import models",
"from django.contrib.auth.models import User",
"class RSMUser(models.Model):\n user = models.OneToOneField(User)\n friends = models.ManyToManyField('self', symmetrical=False, blank=True)\n seen = models.ManyToManyField('my_movies.Movie', related_name='viewers', blank=Tru... |
"""
Test Friends module
"""
from django.test import TestCase
from models import RSMUser, User
#from django.template import RequestContext
from RSM.my_friends.views import getUserNames
getUserNames = getUserNames
class FriendsTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nFriends Test" + str(FriendsTest.testCount) + "\n"
if (FriendsTest.added):
return
#adding some users to the system
user1 = User.objects.create_user("Tomer", "t@t.com", "1234")
user1.save()
RSMUser(user=user1).save()
user2 = User.objects.create_user("Eran", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
user3 = User.objects.create_user("Alex", "t@t.com", "1234")
user3.save()
RSMUser(user=user3).save()
# FriendsTest.added = True
def tearDown(self):
print "\nEnd Friends Test" + str(FriendsTest.testCount) + "\n"
FriendsTest.testCount += 1
def test_1_basic(self):
print "Adding users and checking initial state"
#Checking init of test
userNameList = getUserNames(RSMUser.objects.all())
#Asserting users
assert "Tomer" in userNameList
assert "Eran" in userNameList
assert "Alex" in userNameList
assert "BlaBla" not in userNameList
#Printing users
for u in userNameList:
print "Added RSMUser " + str(u) + " to the system"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
self.assertEquals(n,0)
#Test Finnished
print "Added users to the system, none have friends"
def test_2_addFriend(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
print "Tomer has 1 Friend, No one else has friends"
def test_3_removeFriend(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/friends/remove/", {"friendsList":"Eran"})
print "Removed Eran from friends"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
self.assertEquals(n,0)
print "No one has friends"
def test_4_addMultipleFriends(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran as friend"
print "Added Alex as friend"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,2)
print "Tomer has 2 Friends, No one else has friends"
def test_5_removeMultipleFriends(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran as friend"
print "Added Alex as friend"
self.client.post("/friends/remove/", {"friendsList":("Eran", "Alex")})
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.friends.all())
self.assertEquals(n,0)
print "Tomer has 2 Friends, No one else has friends" | [
[
8,
0,
0.0153,
0.0229,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0382,
0.0076,
0,
0.66,
0.2,
944,
0,
1,
0,
0,
944,
0,
0
],
[
1,
0,
0.0458,
0.0076,
0,
0.66,
... | [
"\"\"\"\nTest Friends module \n\"\"\"",
"from django.test import TestCase",
"from models import RSMUser, User",
"from RSM.my_friends.views import getUserNames",
"getUserNames = getUserNames",
"class FriendsTest(TestCase):\n added = False\n testCount = 1\n \n def setUp(self):\n print(\"\... |
from django.conf.urls.defaults import patterns
urlpatterns = patterns('my_friends.views',
(r'^add/$','addFriend'),
(r'^all/$','viewAllFriends'),
(r'^remove/$','removeFriend'),
(r'^activeAdd/$','actualAdd'),
) | [
[
1,
0,
0.125,
0.125,
0,
0.66,
0,
341,
0,
1,
0,
0,
341,
0,
0
],
[
14,
0,
0.6875,
0.75,
0,
0.66,
1,
990,
3,
5,
0,
0,
75,
10,
1
]
] | [
"from django.conf.urls.defaults import patterns",
"urlpatterns = patterns('my_friends.views',\n (r'^add/$','addFriend'),\n (r'^all/$','viewAllFriends'),\n (r'^remove/$','removeFriend'),\n (r'^activeAdd/$','actualAdd'),\n)"
] |
'''
Created on 21/04/2011
@author: Eran_Z
'''
from django.contrib import admin
from models import RSMUser
admin.site.register(RSMUser)
| [
[
8,
0,
0.3,
0.5,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7,
0.1,
0,
0.66,
0.3333,
302,
0,
1,
0,
0,
302,
0,
0
],
[
1,
0,
0.8,
0.1,
0,
0.66,
0.6667,
... | [
"'''\nCreated on 21/04/2011\n\n@author: Eran_Z\n'''",
"from django.contrib import admin",
"from models import RSMUser",
"admin.site.register(RSMUser)"
] |
from django.template import RequestContext
from django.shortcuts import render_to_response
from models import RSMUser
from RSM.util import verifySessionValidity, getUserNames, getRSMUser
def addFriend(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
li = getUserNames(RSMUser.objects.all())
li.remove(request.user.username)
for u in getUserNames(getRSMUser(request.user.username).friends.all()):
li.remove(u)
return render_to_response("my_friends/addfriend.html", {"users":li, "username": request.user.username}, context_instance=RequestContext(request))
def viewAllFriends(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
username = request.user.username
friendsNames = [u.user.username for u in getRSMUser(username).friends.all()]
hasToRemove = bool(len(friendsNames))
hasToAdd = len(RSMUser.objects.all()) != len(friendsNames)+1
return render_to_response("my_friends/viewfriends.html", {"friends": friendsNames, "hasToRemove": hasToRemove, "hasToAdd": hasToAdd}, context_instance=RequestContext(request))
def removeFriend(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
active_user = getRSMUser(request.user.username)
for o in request.POST.getlist(u'friendsList'):
friend = getRSMUser(o)
active_user.friends.remove(friend)
for gr in active_user.groups.all():
if friend in gr.members.all():
gr.members.remove(friend)
friendsNames = [u.user.username for u in getRSMUser(request.user.username).friends.all()]
hasToRemove = bool(len(friendsNames))
hasToAdd = len(RSMUser.objects.all()) != len(friendsNames)+1
return render_to_response("my_friends/viewfriends.html", {"friends": friendsNames, "hasToRemove": hasToRemove, "hasToAdd": hasToAdd}, context_instance=RequestContext(request))
def actualAdd(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
active_user = getRSMUser(request.user.username)
for o in request.POST.getlist(u'friendsToAdd'):
friend = getRSMUser(o)
active_user.friends.add(friend)
friendsNames = [u.user.username for u in getRSMUser(request.user.username).friends.all()]
hasToRemove = bool(len(friendsNames))
hasToAdd = len(RSMUser.objects.all()) != len(friendsNames)+1
return render_to_response("my_friends/viewfriends.html", {"friends": friendsNames, "hasToRemove": hasToRemove, "hasToAdd": hasToAdd}, context_instance=RequestContext(request))
| [
[
1,
0,
0.0172,
0.0172,
0,
0.66,
0,
213,
0,
1,
0,
0,
213,
0,
0
],
[
1,
0,
0.0345,
0.0172,
0,
0.66,
0.1429,
852,
0,
1,
0,
0,
852,
0,
0
],
[
1,
0,
0.0517,
0.0172,
0,
... | [
"from django.template import RequestContext",
"from django.shortcuts import render_to_response",
"from models import RSMUser",
"from RSM.util import verifySessionValidity, getUserNames, getRSMUser",
"def addFriend(request):\n nonValidSessionFlag = verifySessionValidity(request)\n if nonValidSessionFla... |
from django.db import models
class RSMGroup(models.Model):
owner = models.CharField(max_length=200)
groupName = models.CharField(max_length=200)
members = models.ManyToManyField('my_friends.RSMUser')
class Meta:
unique_together = ("owner", "groupName")
def __unicode__(self):
return self.groupName.__str__()
| [
[
1,
0,
0.0714,
0.0714,
0,
0.66,
0,
40,
0,
1,
0,
0,
40,
0,
0
],
[
3,
0,
0.6429,
0.7857,
0,
0.66,
1,
562,
0,
1,
0,
0,
996,
0,
4
],
[
14,
1,
0.3571,
0.0714,
1,
0.26,
... | [
"from django.db import models",
"class RSMGroup(models.Model):\n owner = models.CharField(max_length=200)\n groupName = models.CharField(max_length=200)\n \n members = models.ManyToManyField('my_friends.RSMUser')\n \n class Meta:\n unique_together = (\"owner\", \"groupName\")",
" own... |
"""
Test Groups module
"""
from django.test import TestCase
from RSM.my_friends.models import RSMUser, User
#from django.template import RequestContext
from RSM.my_friends.views import getUserNames
from RSM.util import getRSMGroup
getRSMGroup = getRSMGroup
getUserNames = getUserNames
class GroupsTest(TestCase):
added = False
testCount = 1
def setUp(self):
print "\nGroups Test" + str(GroupsTest.testCount) + "\n"
if (GroupsTest.added):
return
#adding some users to the system
user1 = User.objects.create_user("Tomer", "t@t.com", "1234")
user1.save()
RSMUser(user=user1).save()
user2 = User.objects.create_user("Eran", "t@t.com", "1234")
user2.save()
RSMUser(user=user2).save()
user3 = User.objects.create_user("Alex", "t@t.com", "1234")
user3.save()
RSMUser(user=user3).save()
def tearDown(self):
print "\nEnd Groups Test" + str(GroupsTest.testCount) + "\n"
GroupsTest.testCount += 1
def test_1_basic(self):
print "Adding users and checking initial state"
#Checking init of test
userNameList = getUserNames(RSMUser.objects.all())
#Asserting users
assert "Tomer" in userNameList
assert "Eran" in userNameList
assert "Alex" in userNameList
assert "BlaBla" not in userNameList
#Printing users
for u in userNameList:
print "Added RSMUser " + str(u) + " to the system"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n1 = len(u.friends.all())
n2 = len(u.groups.all())
self.assertEquals(n1,0)
self.assertEquals(n2,0)
#Test Finnished
print "Added users to the system, none have friends, none are members of groups"
def test_2_addGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
#Adding a new group
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
print "Tomer has 1 groups, No one else has groups"
def test_3_AddFriendToGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),1)
print "Tomer has one group that has one member"
def test_4_removeFriendFromGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"removeFriend","groupname":"testGroup1"})
print "removed Eran from testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members"
def test_5_addMultipleFriendsToGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran & Alex as friends"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"addFriend","groupname":"testGroup1"})
print "added Eran & Alex to testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),2)
print "Tomer has one group that has two members"
def test_6_removeMultipleFriendsFromGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":("Eran","Alex")})
print "Added Eran & Alex as friends"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"addFriend","groupname":"testGroup1"})
print "added Eran & Alex to testGroup1"
self.client.post("/groups/editfriends/", {"friends":("Eran","Alex"),"action":"removeFriend","groupname":"testGroup1"})
print "removed Eran & Alex from testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members"
def test_7_removeFriend(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
self.client.post("/friends/remove/", {"friendsList":"Eran"})
print "Removed Eran from friends, should be removed from group"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members"
def test_8_removeGroup(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
#Adding a new group
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
#Removing a new group
self.client.post("/groups/remove/", {"groupname":"testGroup1"})
print "removed new group: testGroup1"
#Check no one has any friends
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
self.assertEquals(n,0)
print "Tomer has 0 groups, No one else has groups"
def test_9_removeGroupWithFriends(self):
userNameList = getUserNames(RSMUser.objects.all())
assert "Tomer" in userNameList
self.client.login(password=u"1234",username="Tomer")
print "Logged in as Tomer"
self.client.post("/friends/activeAdd/", {"friendsToAdd":"Eran"})
print "Added Eran as friend"
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
self.client.post("/groups/editfriends/", {"friends":"Eran","action":"addFriend","groupname":"testGroup1"})
print "added Eran to testGroup1"
#Removing a new group
self.client.post("/groups/remove/", {"groupname":"testGroup1"})
print "removed new group: testGroup1"
#Adding the group it should not include Eran
self.client.post("/groups/activeAdd/", {"groupname":"testGroup1"})
print "added new group: testGroup1"
#Check
users = RSMUser.objects.all()
for u in (users):
n = len(u.groups.all())
if (u.user.username != "Tomer"):
self.assertEquals(n,0)
else:
self.assertEquals(n,1)
group = getRSMGroup("Tomer","testGroup1")
self.assertEqual(len(group.members.all()),0)
print "Tomer has one group that has no members" | [
[
8,
0,
0.0076,
0.0114,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0189,
0.0038,
0,
0.66,
0.1429,
944,
0,
1,
0,
0,
944,
0,
0
],
[
1,
0,
0.0227,
0.0038,
0,
0.66... | [
"\"\"\"\nTest Groups module \n\"\"\"",
"from django.test import TestCase",
"from RSM.my_friends.models import RSMUser, User",
"from RSM.my_friends.views import getUserNames",
"from RSM.util import getRSMGroup",
"getRSMGroup = getRSMGroup",
"getUserNames = getUserNames",
"class GroupsTest(TestCase):\n ... |
'''
Created on May 26, 2011
@author: dima
'''
from django.conf.urls.defaults import patterns
urlpatterns = patterns('my_groups.views',
(r'^allGroups/$','allGroups'),
(r'^create/$','createGroup'),
(r'^remove/$','removeGroup'),
(r'^activeAdd/$','actualAdd'),
(r'^manage/$','editGroup'),
(r'^editfriends/$','editFrendsInGroup'),
) | [
[
8,
0,
0.2,
0.3333,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4,
0.0667,
0,
0.66,
0.5,
341,
0,
1,
0,
0,
341,
0,
0
],
[
14,
0,
0.7667,
0.5333,
0,
0.66,
1,... | [
"'''\nCreated on May 26, 2011\n\n@author: dima\n'''",
"from django.conf.urls.defaults import patterns",
"urlpatterns = patterns('my_groups.views',\n (r'^allGroups/$','allGroups'),\n (r'^create/$','createGroup'),\n (r'^remove/$','removeGroup'),\n (r'^activeAdd/$','actualAdd'),\n (r'^manage/$','edi... |
from django.template import RequestContext
from django.db import IntegrityError
from django.shortcuts import render_to_response, redirect
from models import RSMGroup
from RSM.util import verifySessionValidity, getRSMGroup, getUserNames, getRSMUser, isSafeName, getExtend
import hashlib
def allGroups(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currentUser = getRSMUser(request.user.username)
groupsNames = [(u.groupName, hashlib.md5(u.groupName).hexdigest()) for u in currentUser.groups.all()]
return render_to_response("my_groups/viewgroups.html", {"groups": groupsNames, "username": request.user.username}, context_instance=RequestContext(request))
def createGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
return render_to_response("my_groups/addgroup.html", {"username": request.user.username}, context_instance=RequestContext(request))
def actualAdd(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
currentUser = getRSMUser(request.user.username)
groupName = request.POST.get(u'groupname')
if not groupName or not isSafeName(groupName):
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message': "Illegal group name."}, context_instance=RequestContext(request))
try:
newGroup = RSMGroup(owner = request.user.username, groupName = groupName)
newGroup.save()
except IntegrityError:
return render_to_response("basic/errorPage.html", {'extend': getExtend(request), 'message': "Group with this name already exists."}, context_instance=RequestContext(request))
currentUser.groups.add(newGroup)
return redirect('/groups/allGroups/')
def removeGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
current_user = getRSMUser(request.user.username)
groupName = request.POST.get(u'groupname')
group = getRSMGroup(request.user.username,groupName)
current_user.groups.remove(group)
group.delete()
return redirect('/groups/allGroups/')
def editGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
groupName = request.POST.get(u'group')
group = getRSMGroup(request.user.username,groupName)
currentUser = getRSMUser(request.user.username)
friends_not_in_group = getUserNames(currentUser.friends.all())
for u in getUserNames(group.members.all()):
friends_not_in_group.remove(u)
hasToAdd = bool(len(friends_not_in_group))
friends_in_group = getUserNames(currentUser.friends.all())
for u in friends_not_in_group:
friends_in_group.remove(u)
hasToRemove = bool(len(friends_in_group))
variables = {"fgroups":friends_in_group, "fngroups":friends_not_in_group,"hasToRemove":hasToRemove, "hasToAdd":hasToAdd, "username":request.user.username, "group": groupName}
return render_to_response("my_groups/edit_group.html", variables, context_instance=RequestContext(request))
def editFrendsInGroup(request):
nonValidSessionFlag = verifySessionValidity(request)
if nonValidSessionFlag:
return nonValidSessionFlag
groupName = request.POST.get(u'groupname')
group = getRSMGroup(request.user.username,groupName)
shouldAdd = request.POST.get(u'action') == "addFriend"
for o in request.POST.getlist(u'friends'):
friend = getRSMUser(o)
if (shouldAdd):
group.members.add(friend)
else:
group.members.remove(friend)
currentUser = getRSMUser(request.user.username)
friends_not_in_group = getUserNames(currentUser.friends.all())
for u in getUserNames(group.members.all()):
friends_not_in_group.remove(u)
hasToAdd = bool(len(friends_not_in_group))
friends_in_group = getUserNames(currentUser.friends.all())
for u in friends_not_in_group:
friends_in_group.remove(u)
hasToRemove = bool(len(friends_in_group))
variables = {"fgroups":friends_in_group, "fngroups":friends_not_in_group,"hasToRemove":hasToRemove, "hasToAdd":hasToAdd, "username":request.user.username, "group": groupName}
return render_to_response("my_groups/edit_group.html", variables, context_instance=RequestContext(request))
| [
[
1,
0,
0.01,
0.01,
0,
0.66,
0,
213,
0,
1,
0,
0,
213,
0,
0
],
[
1,
0,
0.02,
0.01,
0,
0.66,
0.0909,
40,
0,
1,
0,
0,
40,
0,
0
],
[
1,
0,
0.03,
0.01,
0,
0.66,
0.18... | [
"from django.template import RequestContext",
"from django.db import IntegrityError",
"from django.shortcuts import render_to_response, redirect",
"from models import RSMGroup",
"from RSM.util import verifySessionValidity, getRSMGroup, getUserNames, getRSMUser, isSafeName, getExtend",
"import hashlib",
... |
#!/usr/bin/env pypy
import os, sys, logging, re
import argparse
import fnmatch
configurations = {'lite', 'pro'}
package_dirs = {
'lite': ('src/cx/hell/android/pdfview',),
'pro': ('src/cx/hell/android/pdfviewpro',)
}
file_replaces = {
'lite': (
'cx.hell.android.pdfview.',
'"cx.hell.android.pdfview"',
'package cx.hell.android.pdfview;',
'android:icon="@drawable/pdfviewer"',
),
'pro': (
'cx.hell.android.pdfviewpro.',
'"cx.hell.android.pdfviewpro"',
'package cx.hell.android.pdfviewpro;',
'android:icon="@drawable/apvpro_icon"',
),
}
def make_comment(file_type, line):
"""Add comment to line and return modified line, but try not to add comments to already commented out lines."""
if file_type in ('java', 'c'):
return '// ' + line if not line.startswith('//') else line
elif file_type in ('html', 'xml'):
return '<!-- ' + line.strip() + ' -->\n' if not line.strip().startswith('<!--') else line
else:
raise Exception("unknown file type: %s" % file_type)
def remove_comment(file_type, line):
"""Remove comment from line, but only if line is commented, otherwise return unchanged line."""
if file_type in ('java', 'c'):
if line.startswith('// '): return line[3:]
else: return line
elif file_type in ('html', 'xml'):
if line.strip().startswith('<!-- ') and line.strip().endswith(' -->'): return line.strip()[5:-4] + '\n'
else: return line
else:
raise Exception("unknown file type: %s" % file_type)
def handle_comments(conf, file_type, lines, filename):
new_lines = []
re_cmd_starts = re.compile(r'(?:(//|<!--))\s+#ifdef\s+(?P<def>[a-zA-Z]+)')
re_cmd_ends = re.compile(r'(?:(//|<!--))\s+#endif')
required_defs = []
for i, line in enumerate(lines):
m = re_cmd_starts.search(line)
if m:
required_def = m.group('def')
logging.debug("line %s:%d %s matches as start of %s" % (filename, i+1, line.strip(), required_def))
required_defs.append(required_def)
new_lines.append(line)
continue
m = re_cmd_ends.search(line)
if m:
logging.debug("line %s:%d %s matches as endif" % (filename, i+1, line.strip()))
required_defs.pop()
new_lines.append(line)
continue
if len(required_defs) == 0:
new_lines.append(line)
elif len(required_defs) == 1 and required_defs[0] == conf:
new_line = remove_comment(file_type, line)
new_lines.append(new_line)
else:
new_line = make_comment(file_type, line)
new_lines.append(new_line)
assert len(new_lines) == len(lines)
return new_lines
def find_files(dirname, name):
matches = []
for root, dirnames, filenames in os.walk(dirname):
for filename in fnmatch.filter(filenames, name):
matches.append(os.path.join(root, filename))
return matches
def fix_package_dirs(conf):
for i, dirname in enumerate(package_dirs[conf]):
logging.debug("trying to restore %s" % dirname)
if os.path.exists(dirname):
if os.path.isdir(dirname):
logging.debug(" already exists")
continue
else:
logging.error(" %s already exists, but is not dir" % dirname)
continue
# find other name
found_dirname = None
for other_conf, other_dirnames in package_dirs.items():
other_dirname = other_dirnames[i]
if other_conf == conf: continue # skip this conf when looking for other conf
if os.path.isdir(other_dirname):
if found_dirname is None:
found_dirname = other_dirname
else:
# source dir already found :/
raise Exception("too many possible dirs for this package: %s, %s" % (found_dirname, other_dirname))
if found_dirname is None:
raise Exception("didn't find %s" % dirname)
# now rename found_dirname to dirname
os.rename(found_dirname, dirname)
logging.debug("renamed %s to %s" % (found_dirname, dirname))
def handle_comments_in_files(conf, file_type, filenames):
for filename in filenames:
lines = open(filename).readlines()
new_lines = handle_comments(conf, file_type, lines, filename)
if lines != new_lines:
logging.debug("file %s comments changed" % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
def replace_in_files(conf, filenames):
#logging.debug("about replace to %s in %s" % (conf, ', '.join(filenames)))
other_confs = [other_conf for other_conf in file_replaces.keys() if other_conf != conf]
#logging.debug("there are %d other confs to replace from: %s" % (len(other_confs), ', '.join(other_confs)))
for filename in filenames:
new_lines = []
lines = open(filename).readlines()
for line in lines:
new_line = line
for i, target_string in enumerate(file_replaces[conf]):
for other_conf in other_confs:
source_string = file_replaces[other_conf][i]
new_line = new_line.replace(source_string, target_string)
new_lines.append(new_line)
if new_lines != lines:
logging.debug("file %s changed, writing..." % filename)
f = open(filename, 'w')
f.write(''.join(new_lines))
f.close()
del f
else:
logging.debug("file %s didn't change, no need to rewrite" % filename)
def fix_java_files(conf):
filenames = find_files('src', name='*.java')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'java', filenames)
def fix_xml_files(conf):
filenames = find_files('.', name='*.xml')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'xml', filenames)
def fix_html_files(conf):
filenames = find_files('res', name='*.html')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'html', filenames)
def fix_c_files(conf):
filenames = find_files('jni/pdfview2', name='*.c')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
filenames = find_files('jni/pdfview2', name='*.h')
replace_in_files(conf, filenames)
handle_comments_in_files(conf, 'c', filenames)
def fix_resources(conf):
pass
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s')
parser = argparse.ArgumentParser(description='Switch project configurations')
parser.add_argument('--configuration', dest='configuration', default='lite')
args = parser.parse_args()
if not os.path.exists('AndroidManifest.xml'):
raise Exception('android manifest not found, please run this script from main project directory')
conf = args.configuration
if conf not in configurations:
raise Exception("invalid configuration: %s" % conf)
fix_package_dirs(conf)
fix_java_files(conf)
fix_xml_files(conf)
fix_html_files(conf)
fix_c_files(conf)
fix_resources(conf)
if __name__ == '__main__':
main()
| [
[
1,
0,
0.014,
0.0047,
0,
0.66,
0,
688,
0,
4,
0,
0,
688,
0,
0
],
[
1,
0,
0.0187,
0.0047,
0,
0.66,
0.0526,
325,
0,
1,
0,
0,
325,
0,
0
],
[
1,
0,
0.0234,
0.0047,
0,
0... | [
"import os, sys, logging, re",
"import argparse",
"import fnmatch",
"configurations = {'lite', 'pro'}",
"package_dirs = {\n 'lite': ('src/cx/hell/android/pdfview',),\n 'pro': ('src/cx/hell/android/pdfviewpro',)\n}",
"file_replaces = {\n 'lite': (\n 'cx.hell.android.pdfview.',\n '\"c... |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0588,
0.0118,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0706,
0.0118,
0,
0.66,
0.125,
614,
0,
1,
0,
0,
614,
0,
0
],
[
1,
0,
0.0824,
0.0118,
0,
0... | [
"import logging",
"import shutil",
"import sys",
"import urlparse",
"import SimpleHTTPServer",
"import BaseHTTPServer",
"class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"Handle playfoursquare.com requests, for testing.\"\"\"\n\n def do_GET(self):\n logging.warn('do_GET: %s, %s',... |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| [
[
1,
0,
0.1111,
0.037,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1481,
0.037,
0,
0.66,
0.1429,
394,
0,
1,
0,
0,
394,
0,
0
],
[
1,
0,
0.1852,
0.037,
0,
0.6... | [
"import os",
"import subprocess",
"import sys",
"BASEDIR = '../main/src/com/joelapenna/foursquare'",
"TYPESDIR = '../captures/types/v1'",
"captures = sys.argv[1:]",
"if not captures:\n captures = os.listdir(TYPESDIR)",
" captures = os.listdir(TYPESDIR)",
"for f in captures:\n basename = f.split('... |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| [
[
8,
0,
0.0631,
0.0991,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1261,
0.009,
0,
0.66,
0.05,
2,
0,
1,
0,
0,
2,
0,
0
],
[
1,
0,
0.1351,
0.009,
0,
0.66,
0.... | [
"\"\"\"\nPull a oAuth protected page from foursquare.\n\nExpects ~/.oget to contain (one on each line):\nCONSUMER_KEY\nCONSUMER_KEY_SECRET\nUSERNAME\nPASSWORD",
"import httplib",
"import os",
"import re",
"import sys",
"import urllib",
"import urllib2",
"import urlparse",
"import user",
"from xml.... |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0201,
0.0067,
0,
0.66,
0,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.0268,
0.0067,
0,
0.66,
0.0769,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0336,
0.0067,
0,
... | [
"import datetime",
"import sys",
"import textwrap",
"import common",
"from xml.dom import pulldom",
"PARSER = \"\"\"\\\n/**\n * Copyright 2009 Joe LaPenna\n */\n\npackage com.joelapenna.foursquare.parsers;\n\nimport com.joelapenna.foursquare.Foursquare;",
"BOOLEAN_STANZA = \"\"\"\\\n } else i... |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| [
[
1,
0,
0.0263,
0.0088,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0439,
0.0088,
0,
0.66,
0.0833,
290,
0,
1,
0,
0,
290,
0,
0
],
[
1,
0,
0.0526,
0.0088,
0,
... | [
"import logging",
"from xml.dom import minidom",
"from xml.dom import pulldom",
"BOOLEAN = \"boolean\"",
"STRING = \"String\"",
"GROUP = \"Group\"",
"DEFAULT_INTERFACES = ['FoursquareType']",
"INTERFACES = {\n}",
"DEFAULT_CLASS_IMPORTS = [\n]",
"CLASS_IMPORTS = {\n# 'Checkin': DEFAULT_CLASS_IMP... |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
# for binaries NOT on your PATH, you should specify the complete path here,
# e.g. SVN = '/usr/bin/svn'. For binaries ON your path, only the binary name
# e.g. SVN = 'svn'
SVN = 'svn'
HG = 'hg'
CVS = 'cvs -z3'
GIT = 'git'
PATCH = 'patch'
# only required on Windows
DEVENV = 'devenv'
# on windows, cmake should be on your path, or you should specify the
# full path here. On *ix, you don't have to touch this (johannes
# builds and configures its own cmake)
CMAKE_BINPATH = 'cmake'
# set to True if you want to use distcc on *ix, False otherwise
HAVE_DISTCC = False
# on *ix, use this many parallel make processes
# if you're using distcc, this should be even higher.
NUM_MAKE_PROCESSES = 4
# Set to True if you want to build redistributable DeVIDE binaries
# with PyInstaller as part of the johannes build process. If False,
# you can still run DeVIDE directly from its build directory, and you
# can also create redistributable binaries at a later stage.
BUILD_DEVIDE_DISTRIBUTABLES = False
# nothing for you to edit below this line
#######################################################################
import os
import sys
# this is manually updated by the DeVIDE developers to indicate
# which changeset of DeVIDE this johannes changeset is able to build
DEVIDE_CHANGESET_ID = "5bd1581ebcab"
# this should be the date of the above changeset ID
# and probably the new-style DeVIDE versioning
# so for release: DeVIDE v11.9.16
# for dev: DeVIDE vDEV11.9.16
DEVIDE_DATESTR = "12.4.18"
# contains fixes for dre_top being duplicated, hence
# breaking the pythonhome!
DRE_CHANGESET_ID = "34c8b63b2ac9"
VTKDEVIDE_CHANGESET_ID = "bdc8e1f7e6e6"
BUILD_TARGET = 'RelWithDebInfo'
# will be filled in by init()
JOHANNES_REVISION_ID = "NOT SET"
# the following variables are written by various InstallPackages
####################################################################
# will be written by init()
MAKE = ''
SO_EXT = ''
PYE_EXT = ''
EXE_EXT = ''
WINARCH = ''
WINARCH_STR = ''
# together with CMAKE_BIN_PATH, these will be used by the utils
# modules to build up a cmake command.
CMAKE_DEFAULT_PARAMS = '' # this will be set by init()
CMAKE_PRE_VARS = ''
DCMTK_INCLUDE = ''
DCMTK_LIB = ''
VTK_DIR = ''
VTK_LIB = ''
VTK_SODIR = ''
VTK_PYTHON = ''
ITK_LIB = ''
ITK_BIN = ''
ITK_PYTHON = ''
GDCM_LIB = ''
GDCM_PYTHON = ''
VTKDEVIDE_LIB = ''
VTKDEVIDE_PYTHON = ''
VTKTUDOSS_LIB = ''
VTKTUDOSS_PYTHON =''
WX_LIB_PATH = ''
WXP_PYTHONPATH = ''
ITK_DIR = ''
ITK_BIN = ''
WRAPITK_LIB = ''
WRAPITK_PYTHON = ''
DEVIDE_PY = ''
PYTHON_EXECUTABLE = ''
PYTHON_INCLUDE_PATH = ''
PYTHON_LIBRARY = ''
DEVIDE_INST_DIR = ''
#######################################################################
# UTILITY method (also available in utils.py which we don't want to import)
def get_status_output(command):
"""Run command, return output of command and exit code in status.
In general, status is None for success and 1 for command not
found.
"""
ph = os.popen(command)
output = ph.read()
status = ph.close()
return (status, output)
def init(wd, the_profile):
global working_dir, archive_dir, build_dir, inst_dir
working_dir = os.path.abspath(wd)
archive_dir = os.path.join(working_dir, 'archive')
build_dir = os.path.join(working_dir, 'build')
inst_dir = os.path.join(working_dir, 'inst')
# we will also need directory where johannes finds itself, in
# order to retrieve patches.
global johannes_dir, patches_dir, ip_dir
johannes_dir = os.path.dirname(__file__)
patches_dir = os.path.join(johannes_dir, 'patches')
ip_dir = os.path.join(johannes_dir, 'install_packages')
# get revision ID
global JOHANNES_REVISION_ID
status, output = get_status_output("%s id %s" % (HG, johannes_dir))
# strip is in case we have single token to get rid of \n
JOHANNES_REVISION_ID = output.split(' ')[0].strip()
global profile
profile = the_profile
global python_library_path, python_binary_path, python_scripts_path
python_library_path = os.path.join(inst_dir, 'python', 'lib')
if os.name == 'nt':
python_binary_path = os.path.join(inst_dir, 'python')
else:
python_binary_path = os.path.join(inst_dir, 'python', 'bin')
python_scripts_path = os.path.join(inst_dir, 'python', 'Scripts')
# platform dependent stuff =========================================
# use conditionals based on os.name (posix, nt) and sys.platform (linux2,
# win32)
global MAKE, DEVENV, CMAKE_DEFAULT_PARAMS, CMAKE_PRE_VARS
global SO_EXT, PYE_EXT, EXE_EXT
# FIXME: change convention to x86, amd64, ia64 instead of 32bit and 64bit.
# Go through all user code to fix.
global WINARCH, WINARCH_STR
if os.name == 'posix':
CMAKE_DEFAULT_PARAMS = '-G "Unix Makefiles"'
MAKE = 'make -j%d' % (NUM_MAKE_PROCESSES,)
if HAVE_DISTCC:
CMAKE_PRE_VARS = 'CC="distcc cc" CXX="distcc c++"'
else:
CMAKE_PRE_VARS = ''
SO_EXT = '.so'
PYE_EXT = SO_EXT
elif os.name == 'nt':
import platform
a = platform.architecture()[0]
if a == '32bit':
CMAKE_DEFAULT_PARAMS = '-G "Visual Studio 9 2008"'
# where the %s substitution is the SLN file
# important that devenv is run, and NOT devenv.exe!
MAKE = DEVENV + ' %s /project %s ' \
'/projectconfig "%s|Win32" /build %s'
WINARCH = '32bit'
WINARCH_STR = 'x86'
else:
CMAKE_DEFAULT_PARAMS = '-G "Visual Studio 9 2008 Win64"'
# where the %s substitution is the SLN file
# important that devenv is run, and NOT devenv.exe!
MAKE = DEVENV + ' %s /project %s ' \
'/projectconfig "%s|x64" /build %s'
WINARCH = '64bit'
WINARCH_STR = 'x64'
SO_EXT = '.dll'
PYE_EXT = '.pyd'
EXE_EXT = '.exe'
# now setup some python stuff
global PYTHON_EXECUTABLE
global PYTHON_INCLUDE_PATH
global PYTHON_LIBRARY
global PYTHON_SITE_PACKAGES
from distutils import sysconfig
PYTHON_EXECUTABLE = sys.executable
PYTHON_INCLUDE_PATH = sysconfig.get_python_inc()
PYTHON_SITE_PACKAGES = sysconfig.get_python_lib()
# PYTHON_LIBRARY:
if os.name == 'posix':
# under linux, we want the location of libpython2.5.so, under a
# self-built installation, that's python-inst/lib/libpython2.5.so
# system installation is /usr/lib/libpython2.5.so
ldl = sysconfig.get_config_var('LDLIBRARY') # gives the SO name
ll = os.path.join(sysconfig.get_config_var('prefix'), 'lib')
PYTHON_LIBRARY = os.path.join(ll, ldl)
elif os.name == 'nt':
# under windows, we want Python25\libs\python25.lib (the link
# stub for the DLL)
# first derive python25.lib
ldl = 'python%s%s.lib' % \
tuple(sysconfig.get_python_version().split('.'))
# then figure out python25\libs
ll = os.path.join(sysconfig.get_config_var('prefix'), 'libs')
PYTHON_LIBRARY = os.path.join(ll, ldl)
if not os.path.exists(PYTHON_LIBRARY):
raise RuntimeError(
'!!!!! %s does not exist (python-dev installed?).' %
(PYTHON_LIBRARY,))
| [
[
14,
0,
0.0403,
0.004,
0,
0.66,
0,
417,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0444,
0.004,
0,
0.66,
0.0189,
4,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0484,
0.004,
0,
0.66,
... | [
"SVN = 'svn'",
"HG = 'hg'",
"CVS = 'cvs -z3'",
"GIT = 'git'",
"PATCH = 'patch'",
"DEVENV = 'devenv'",
"CMAKE_BINPATH = 'cmake'",
"HAVE_DISTCC = False",
"NUM_MAKE_PROCESSES = 4",
"BUILD_DEVIDE_DISTRIBUTABLES = False",
"import os",
"import sys",
"DEVIDE_CHANGESET_ID = \"5bd1581ebcab\"",
"DEV... |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import utils
import os
import shutil
import types
class InstallPackage:
"""All libraries that should be installed by johannes have to have
InstallPackage abstractions. This class defines which actions need to
be taken to get, configure, build and install a complete library /
software package.
"""
def get(self):
pass
def unpack(self):
pass
def configure(self):
pass
def build(self):
pass
def install(self):
pass
def clean_build(self):
"""This method should clean up in such a way that the next build
of this package will result in AT LEAST all steps from configure
and onwards. By default, it removes the build dir and calls
clean_install().
"""
utils.output("Removing build and installation directories.")
if os.path.exists(self.build_dir):
shutil.rmtree(self.build_dir)
self.clean_install()
def clean_install(self):
""" Only cleans up the install directory.
"""
utils.output("Removing installation directory.")
if os.path.exists(self.inst_dir):
shutil.rmtree(self.inst_dir)
def list(self):
""" Lists the methods of this install package.
(Sometimes I forget what the exact names are)
"""
atts = dir(self)
for att in atts:
if type(getattr(self, att)) == types.MethodType:
utils.output(att)
| [
[
1,
0,
0.0847,
0.0169,
0,
0.66,
0,
970,
0,
1,
0,
0,
970,
0,
0
],
[
1,
0,
0.1017,
0.0169,
0,
0.66,
0.25,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1186,
0.0169,
0,
0.... | [
"import utils",
"import os",
"import shutil",
"import types",
"class InstallPackage:\n\n \"\"\"All libraries that should be installed by johannes have to have\n InstallPackage abstractions. This class defines which actions need to\n be taken to get, configure, build and install a complete library ... |
# Copyright (c) Charl P. Botha, TU Delft.
# All rights reserved.
# See COPYRIGHT for details.
import config
import glob
import os
import re
import sys, urllib
import shutil
import tarfile
import zipfile
import subprocess
def cmake_command(build_dir, source_dir, cmake_params):
"""Invoke correct cmake commands to configure a build directory.
@param build_dir: out-of-source build directory. method will
chdir there before invoking cmake
@param source_dir: location of the source that will be built
@cmake_params: string of "-Dparam=blaat -Dparam2=blaat" specifying
cmake parameters
"""
# first create correct cmake invocation
cmake = '%s %s' % (config.CMAKE_BINPATH, config.CMAKE_DEFAULT_PARAMS)
if len(config.CMAKE_PRE_VARS):
cmake = config.CMAKE_PRE_VARS + ' ' + cmake
# then go to build_dir
os.chdir(build_dir)
# then invoke cmake
ret = os.system("%s %s %s" %
(cmake, cmake_params, source_dir))
# on windows, we have to do this a second time (first time
# configures, second time generates)
if os.name == 'nt':
ret = os.system("%s %s %s" %
(cmake, cmake_params, source_dir))
return ret
def copy_glob(src_glob, dst_dir):
"""Copy all files and dirs included by src_glob into the directory specified in dst_dir.
e.g. usage: copy_glob('/etc/*', '/backup/my_etc/')
"""
if not os.path.exists(dst_dir):
os.mkdir(dst_dir)
if not os.path.isdir(dst_dir):
raise RuntimeError('%s is not a directory.' % (dst_dir,))
for fn in glob.glob(src_glob):
if os.path.isdir(fn):
# copytree needs full path in srt and dst
# e.g. copytree('/build/dir/numpy', 'python/lib/site-packages/numpy')
shutil.copytree(fn,os.path.join(dst_dir,os.path.basename(fn)), symlinks=True)
else:
# shutil is clever enough to take a directory as destination
shutil.copy(fn, dst_dir)
def find_command_with_ver(name, command, ver_re):
"""Try to run command, use ver_re regular expression to parse for
the version string. This will print for example:
CVS: version 2.11 found.
@return: True if command found, False if not or if version could
not be parsed.
"""
retval = False
s,o = get_status_output(command)
if s:
msg2 = 'NOT FOUND!'
else:
mo = re.search(ver_re, o, re.MULTILINE)
if mo:
msg2 = 'version %s found.' % (mo.groups()[0],)
retval = True
else:
msg2 = 'could not extract version.'
output("%s: %s" % (name, msg2))
return retval
def find_files(start_dir, re_pattern='.*\.(pyd|dll)', exclude_pats=[]):
"""Recursively find all files (not directories) with filenames
matching given regular expression. Case is ignored.
@param start_dir: search starts in this directory
@param re_pattern: regular expression with which all found files
will be matched. example: re_pattern = '.*\.(pyd|dll)' will match
all filenames ending in pyd or dll.
@param exclude_pats: if filename (without directory) matches any
one of these patterns, do not include it in the list
@return: list of fully qualified filenames that satisfy the
pattern
"""
cpat = re.compile(re_pattern, re.IGNORECASE)
found_files = []
excluded_files = []
for dirpath, dirnames, filenames in os.walk(start_dir):
ndirpath = os.path.normpath(os.path.abspath(dirpath))
for fn in filenames:
if cpat.match(fn):
# see if fn does not satisfy one of the exclude
# patterns
exclude_fn = False
for exclude_pat in exclude_pats:
if re.match(exclude_pat, fn, re.IGNORECASE):
exclude_fn = True
break
if not exclude_fn:
found_files.append(os.path.join(ndirpath,fn))
else:
excluded_files.append(os.path.join(ndirpath,fn))
return found_files, excluded_files
def get_status_output(command):
"""Run command, return output of command and exit code in status.
In general, status is None for success and 1 for command not
found.
"""
ph = os.popen(command)
output = ph.read()
status = ph.close()
return (status, output)
def output(message, rpad=0, rpad_char='#'):
s = "#####J> %s" % (message,)
pn = rpad - len(s)
if pn < 0:
pn = 0
p = pn * rpad_char
print "%s %s" % (s,p)
# flush the buffer, else things are out of sync in any log files
sys.stdout.flush()
def error(message):
raise RuntimeError('!!!!! %s' % (message,))
def file_exists(posix_file, nt_file):
"""Used to perform platform-specific file existence check.
"""
if os.name == 'posix':
fn = posix_file
else: # os.name == 'nt'
fn = nt_file
return os.path.exists(fn)
def human_size(num):
"""Method to convert number of bytes to human-readable version.
Code from http://blogmag.net/blog/read/38/Print_human_readable_file_size
"""
for x in ['bytes','KB','MB','GB','TB']:
if num < 1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
def make_command(solution_file, install=False, project=None,
win_buildtype=None):
"""Install packages can use this method to invoke the
platform-specific compile command. This can only be run after
config.init() has run.
@param solution_file: only used on Windows, ignored on *ix.
@param install: if true, invokes the make command to install the
built project.
@param project: Only build the named project on Windows. This
overrides the install setting!
@param win_buildtype: change the buildtype on windows, default
value is None, which gets translated to the value of
config.BUILD_TARGET.
"""
if os.name == 'posix':
if install:
make_command = '%s install' % (config.MAKE,)
else:
make_command = config.MAKE
else: # os.name == 'nt'
if install:
prj = 'INSTALL'
else:
prj = 'ALL_BUILD'
if project:
prj = project
if win_buildtype:
buildtype = win_buildtype
else:
buildtype = config.BUILD_TARGET
make_command = config.MAKE % \
(solution_file, prj, buildtype, buildtype)
return os.system(make_command)
def urlget(url, output_filename=None):
"""Simple method to retrieve URL. It will get the file in the current
directory.
If urlget guesses the wrong download filename based on the URL, pass
the output_filename parameter.
FIXME: this does not trap 404 errors. Seems the best way to do this is
to override FancyURLOpener with a new http_error_default
"""
def reporthook(blocknum, blocksize, totalsize):
current_size = blocknum * blocksize
current_size_kb = int(current_size / 1024.0)
sys.stdout.write(
'% 4.0f %% (%d Kbytes) downloaded\r' %
(current_size / float(totalsize) * 100.0, current_size_kb))
if output_filename:
filename = output_filename
else:
i = url.rfind('/')
filename = url[i+1:]
print url, "->", filename
if os.path.exists(filename):
output("%s already present, skipping download." % (filename,))
else:
urllib.urlretrieve(url, filename, reporthook)
sys.stdout.write("\n")
output("Download complete.")
return filename
def goto_archive():
os.chdir(config.archive_dir)
def goto_build():
os.chdir(config.build_dir)
def goto_inst():
os.chdir(config.inst_dir)
def unpack(archive_filename):
"""Unpacks given archive_filename in the current directory. It is
the caller's responsibility to make sure the current directory is
the desired destination.
It's preferable to make use of wrapper methods such as
unpack_build and unpack_install.
"""
tar = None
zip = None
if archive_filename.lower().endswith('bz2'):
m = 'r|bz2'
tar = tarfile.open(archive_filename, m)
elif archive_filename.lower().endswith('gz'):
m = 'r|gz'
tar = tarfile.open(archive_filename, m)
else:
zip = zipfile.ZipFile(archive_filename)
if tar:
# extractall is from python 2.5 onwards
# tar.extractall()
# we use a form that works on previous versions as well
for tarinfo in tar:
print tarinfo.name
tar.extract(tarinfo)
tar.close()
else:
for zipinfo in zip.infolist():
# first check if we need to create the directory housing
# the file
dn = os.path.dirname(zipinfo.filename)
if dn and not os.path.isdir(dn):
os.makedirs(dn)
# we only extract the file if it's not purely a directory
if not os.path.isdir(zipinfo.filename):
print "%s - %s" % (zipinfo.filename, \
human_size(zipinfo.file_size))
# have to write this in binary mode, else we screw up
# binaries (EXEs and such) quite badly. :)
f = open(zipinfo.filename, 'wb')
f.write(zip.read(zipinfo.filename))
f.close()
zip.close()
def unpack_archive(archive_filename):
"""Unpack given archive_filename in the archive (sources) directory.
"""
goto_archive()
unpack(archive_filename)
def unpack_build(archive_filename):
"""Unpack given archive_filename in build directory.
"""
goto_build()
unpack(archive_filename)
def unpack_inst(archive_filename):
"""Unpack given archive_filename in installation directory.
"""
goto_inst()
unpack(archive_filename)
def re_sub_filter_file(repls, filename):
"""Given a list of repls (tuples with regular expresions and
replacement patterns that are used as the first and second params
of re.sub), filter filename line by line.
A backup of the file is made to filename.orig.
"""
newfilename = '%s.new' % (filename,)
origfilename = '%s.orig' % (filename,)
shutil.copyfile(filename, origfilename)
ifile = file(filename)
ofile = file(newfilename, 'w')
for l in ifile:
for r in repls:
l = re.sub(r[0], r[1], l)
ofile.write(l)
ifile.close()
ofile.close()
shutil.copyfile(newfilename, filename)
os.unlink(newfilename)
os.unlink(origfilename)
def execute_in_vs_environment(post_commands, pre_commands='', communicate=''):
""" Executes the specified commands as if from the Visual Studio
command prompt. "vcvarsall.bat" needs to be on the PATH for this.
post_commands: Commands executed after setting up the environment.
This should be one string (separate using '&').
pre_commands: Executed before setting the environment.
communicate: Command sent to stdin after post_commands.
"""
if config.WINARCH == '64bit':
astr = 'amd64'
else:
astr = 'x86'
if pre_commands:
if pre_commands[-1] != '&':
pre_commands += '&'
if post_commands:
if post_commands[0] != '&':
post_commands = '&' + post_commands
p = subprocess.Popen('%s%s %s%s' % (
pre_commands,
"vcvarsall.bat",
astr,
post_commands),
shell=True, stdin=subprocess.PIPE)
if communicate:
p.communicate(communicate)
return p.wait()
| [
[
1,
0,
0.0126,
0.0025,
0,
0.66,
0,
308,
0,
1,
0,
0,
308,
0,
0
],
[
1,
0,
0.0151,
0.0025,
0,
0.66,
0.0357,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.0176,
0.0025,
0,
... | [
"import config",
"import glob",
"import os",
"import re",
"import sys, urllib",
"import shutil",
"import tarfile",
"import zipfile",
"import subprocess",
"def cmake_command(build_dir, source_dir, cmake_params):\n \"\"\"Invoke correct cmake commands to configure a build directory.\n\n @param ... |
#!/usr/bin/env python
#
# $Id: setup.py,v 1.11 2005/02/15 16:32:22 warnes Exp $
CVS=0
from distutils.core import setup, Command, Extension
from SOAPpy.version import __version__
url="http://pywebsvcs.sf.net/"
long_description="SOAPpy provides tools for building SOAP clients and servers. For more information see " + url
if CVS:
import time
__version__ += "_CVS_" + time.strftime('%Y_%m_%d')
setup(name="SOAPpy",
version=__version__,
description="SOAP Services for Python",
maintainer="Gregory Warnes",
maintainer_email="Gregory.R.Warnes@Pfizer.com",
url = url,
long_description=long_description,
packages=['SOAPpy','SOAPpy/wstools']
)
| [
[
14,
0,
0.1724,
0.0345,
0,
0.66,
0,
504,
1,
0,
0,
0,
0,
1,
0
],
[
1,
0,
0.2414,
0.0345,
0,
0.66,
0.1667,
152,
0,
3,
0,
0,
152,
0,
0
],
[
1,
0,
0.2759,
0.0345,
0,
0... | [
"CVS=0",
"from distutils.core import setup, Command, Extension",
"from SOAPpy.version import __version__",
"url=\"http://pywebsvcs.sf.net/\"",
"long_description=\"SOAPpy provides tools for building SOAP clients and servers. For more information see \" + url",
"if CVS:\n import time\n __version__ +=... |
#!/usr/bin/env python
ident = '$Id: whoisTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
server = SOAPProxy("http://www.SoapClient.com/xml/SQLDataSoap.WSDL",
http_proxy=proxy)
print "whois>>", server.ProcessSRL(SRLFile="WHOIS.SRI",
RequestName="whois",
key = "microsoft.com")
| [
[
1,
0,
0.25,
0.25,
0,
0.66,
0,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.5,
0.25,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.75,
0.25,
0,
0.66,
1,
... | [
"import os, re",
"import sys",
"from SOAPpy import SOAPProxy"
] |
#!/usr/bin/env python
ident = '$Id: BabelfishWSDLTest.py,v 1.1 2003/07/18 15:58:28 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import WSDL
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
server = WSDL.Proxy('http://www.xmethods.net/sd/2001/BabelFishService.wsdl',
http_proxy=proxy)
english = "Hi Friend!"
print "Babelfish Translations"
print "------------------------"
print "English: '%s'" % english
print "French: '%s'" % server.BabelFish('en_fr',english)
print "Spanish: '%s'" % server.BabelFish('en_es',english)
print "Italian: '%s'" % server.BabelFish('en_it',english)
print "German: '%s'" % server.BabelFish('en_de',english)
print "Done."
| [
[
14,
0,
0.0938,
0.0312,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1562,
0.0312,
0,
0.66,
0.0667,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.1875,
0.0312,
0,
0... | [
"ident = '$Id: BabelfishWSDLTest.py,v 1.1 2003/07/18 15:58:28 warnes Exp $'",
"import os, re",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import WSDL",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s:... |
import sys
sys.path.insert(1, "..")
import SOAPpy
import time
dep = SOAPpy.dateTimeType((2004, 3, 24, 12, 30, 59, 4, 86, 0))
ret = SOAPpy.dateTimeType((2004, 3, 26, 12, 30, 59, 4, 86, 0))
in0 = SOAPpy.structType()
in0._addItem('outwardDate', dep)
in0._addItem('returnDate', ret)
in0._addItem('originAirport', 'den')
in0._addItem('destinationAirport', 'iad')
x = SOAPpy.buildSOAP(
in0,
method="getAirFareQuote",
namespace="urn:SBGAirFareQuotes.sbg.travel.ws.dsdata.co.uk"
)
wsdl = 'http://www.xmethods.net/sd/2001/TemperatureService.wsdl'
proxy = SOAPpy.WSDL.Proxy(wsdl)
| [
[
1,
0,
0.0385,
0.0385,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.0769,
0.0385,
0,
0.66,
0.0769,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.1538,
0.0385,
0,
0.... | [
"import sys",
"sys.path.insert(1, \"..\")",
"import SOAPpy",
"import time",
"dep = SOAPpy.dateTimeType((2004, 3, 24, 12, 30, 59, 4, 86, 0))",
"ret = SOAPpy.dateTimeType((2004, 3, 26, 12, 30, 59, 4, 86, 0))",
"in0 = SOAPpy.structType()",
"in0._addItem('outwardDate', dep)",
"in0._addItem('returnDate',... |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: xmethods.py,v 1.4 2003/12/18 06:31:50 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
print "##########################################"
print " SOAP services registered at xmethods.net"
print "##########################################"
server = SOAPProxy("http://www.xmethods.net/interfaces/query",
namespace = 'urn:xmethods-delayed-quotes',
http_proxy=proxy)
names = server.getAllServiceNames()
for item in names:
print 'name:', item['name']
print 'id :', item['id']
print
| [
[
14,
0,
0.1176,
0.0294,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1765,
0.0294,
0,
0.66,
0.0909,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.2059,
0.0294,
0,
0... | [
"ident = '$Id: xmethods.py,v 1.4 2003/12/18 06:31:50 warnes Exp $'",
"import os, re",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import SOAPProxy",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s:%s\"... |
#!/usr/bin/env python
ident = '$Id: newsTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
SoapEndpointURL = 'http://www22.brinkster.com/prasads/BreakingNewsService.asmx?WSDL'
MethodNamespaceURI = 'http://tempuri.org/'
# Three ways to do namespaces, force it at the server level
server = SOAPProxy(SoapEndpointURL, namespace = MethodNamespaceURI,
soapaction='http://tempuri.org/GetCNNNews', encoding = None,
http_proxy=proxy)
print "[server level CNN News call]"
print server.GetCNNNews()
# Do it inline ala SOAP::LITE, also specify the actually ns (namespace) and
# sa (soapaction)
server = SOAPProxy(SoapEndpointURL, encoding = None)
print "[inline CNNNews call]"
print server._ns('ns1',
MethodNamespaceURI)._sa('http://tempuri.org/GetCNNNews').GetCNNNews()
# Create an instance of your server with specific namespace and then use
# inline soapactions for each call
dq = server._ns(MethodNamespaceURI)
print "[namespaced CNNNews call]"
print dq._sa('http://tempuri.org/GetCNNNews').GetCNNNews()
print "[namespaced CBSNews call]"
print dq._sa('http://tempuri.org/GetCBSNews').GetCBSNews()
| [
[
14,
0,
0.0682,
0.0227,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1136,
0.0227,
0,
0.66,
0.0588,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.1364,
0.0227,
0,
0... | [
"ident = '$Id: newsTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'",
"import os, re",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import SOAPProxy",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s:%s\"... |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: translateTest.py,v 1.5 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
server = SOAPProxy("http://services.xmethods.com:80/perl/soaplite.cgi",
http_proxy=proxy)
babel = server._ns('urn:xmethodsBabelFish#BabelFish')
print babel.BabelFish(translationmode = "en_fr",
sourcedata = "The quick brown fox did something or other")
| [
[
14,
0,
0.1739,
0.0435,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2609,
0.0435,
0,
0.66,
0.1429,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.3043,
0.0435,
0,
0... | [
"ident = '$Id: translateTest.py,v 1.5 2003/05/21 14:52:37 warnes Exp $'",
"import os, re",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import SOAPProxy",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s... |
import sys
sys.path.insert(1, "..")
from SOAPpy import *
one = typedArrayType(data=[1],typed=type(1))
tmp = typedArrayType(data=[], typed=type(1))
print buildSOAP( one )
print buildSOAP( tmp )
| [
[
1,
0,
0.125,
0.125,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.25,
0.125,
0,
0.66,
0.1667,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.375,
0.125,
0,
0.66,
... | [
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"one = typedArrayType(data=[1],typed=type(1))",
"tmp = typedArrayType(data=[], typed=type(1))",
"print(buildSOAP( one ))",
"print(buildSOAP( tmp ))"
] |
#!/usr/bin/python2
#standard imports
import syslog, sys
#domain specific imports
sys.path.insert (1, '..')
import SOAPpy
SOAPpy.Config.simplify_objects=1
## def test_integer(self,pass_integer):
## def test_string(self,pass_string):
## def test_float(self,pass_float):
## def test_tuple(self,pass_tuple):
## def test_list(self,pass_list):
## def test_dictionary(self,pass_dictionary):
if __name__ == "__main__":
server = SOAPpy.SOAPProxy("http://localhost:9999")
original_integer = 5
result_integer = server.test_integer(original_integer)
print "original_integer %s" % original_integer
print "result_integer %s" % result_integer
assert(result_integer==original_integer)
print
original_string = "five"
result_string = server.test_string(original_string)
print "original_string %s" % original_string
print "result_string %s" % result_string
assert(result_string==original_string)
print
original_float = 5.0
result_float = server.test_float(original_float)
print "original_float %s" % original_float
print "result_float %s" % result_float
assert(result_float==original_float)
print
original_tuple = (1,2,"three","four",5)
result_tuple = server.test_tuple(original_tuple)
print "original_tuple %s" % str(original_tuple)
print "result_tuple %s" % str(result_tuple)
assert(tuple(result_tuple)==original_tuple)
print
original_list = [5,4,"three",2,1]
result_list = server.test_list(original_list)
print "original_list %s" % original_list
print "result_list %s" % result_list
assert(result_list==original_list)
print
original_dictionary = {
'one': 1,
"two": 2,
"three": 3,
"four": 4,
"five": 5,
}
result_dictionary = server.test_dictionary(original_dictionary)
print "original_dictionary %s" % original_dictionary
print "result_dictionary %s" % result_dictionary
assert(result_dictionary==original_dictionary)
print
server.quit()
| [
[
1,
0,
0.25,
0.25,
0,
0.66,
0,
76,
0,
2,
0,
0,
76,
0,
0
],
[
1,
0,
0.75,
0.25,
0,
0.66,
1,
181,
0,
1,
0,
0,
181,
0,
0
]
] | [
"import syslog, sys",
"import SOAPpy"
] |
import sys
sys.path.insert(1, "..")
import SOAPpy
url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'
zip = '06340'
proxy = SOAPpy.WSDL.Proxy(url)
temp = proxy.getTemp(zip)
print 'Temperature at', zip, 'is', temp
| [
[
1,
0,
0.1,
0.1,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.3,
0.1,
0,
0.66,
0.1429,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.4,
0.1,
0,
0.66,
0.2857,
... | [
"import sys",
"sys.path.insert(1, \"..\")",
"import SOAPpy",
"url = 'http://www.xmethods.org/sd/2001/TemperatureService.wsdl'",
"zip = '06340'",
"proxy = SOAPpy.WSDL.Proxy(url)",
"temp = proxy.getTemp(zip)",
"print('Temperature at', zip, 'is', temp)"
] |
"""
Check handing of unicode.
"""
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
#Config.dumpHeadersIn = 1
#Config.dumpSOAPIn = 1
#Config.dumpSOAPOut = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 0
#Config.BuildWithNoType = 1
#Config.BuildWithNoNamespacePrefix = 1
server = SOAPProxy("http://localhost:9900/")
x = u'uMOO' # Single unicode string
y = server.echo_simple((x,))
assert( x==y[0] )
x = [u'uMoo1',u'uMoo2'] # array of unicode strings
y = server.echo_simple(x)
assert( x[0] == y[0] )
assert( x[1] == y[1] )
x = {
u'A':1,
u'B':u'B',
'C':u'C',
'D':'D'
}
y = server.echo_simple(x)
for key in x.keys():
assert( x[key] == y[0][key] )
print "Success"
| [
[
8,
0,
0.0465,
0.0698,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1163,
0.0233,
0,
0.66,
0.0769,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.1395,
0.0233,
0,
0.66... | [
"\"\"\"\nCheck handing of unicode.\n\"\"\"",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"Config.simplify_objects = 0",
"server = SOAPProxy(\"http://localhost:9900/\")",
"x = u'uMOO' # Single unicode string",
"y = server.echo_simple((x,))",
"x = [u'uMoo1',u'uMoo2'] # array of... |
from SOAPpy import WSDL
server = WSDL.Proxy('/home/warneg/src/google/googleapi/GoogleSearch.wsdl')
key = "6k0oDPZQFHL0zpjy6ZO6ufUVFKBgvqTo"
results = server.doGoogleSearch(key, 'warnes', 0, 10, False, "",
False, "", "utf-8", "utf-8")
for i in range(len(results.resultElements)):
res = results.resultElements[i]
print '%d: %s --> %s' % ( i, res.title, res.URL )
| [
[
1,
0,
0.0909,
0.0909,
0,
0.66,
0,
181,
0,
1,
0,
0,
181,
0,
0
],
[
14,
0,
0.1818,
0.0909,
0,
0.66,
0.25,
268,
3,
1,
0,
0,
363,
10,
1
],
[
14,
0,
0.2727,
0.0909,
0,
... | [
"from SOAPpy import WSDL",
"server = WSDL.Proxy('/home/warneg/src/google/googleapi/GoogleSearch.wsdl')",
"key = \"6k0oDPZQFHL0zpjy6ZO6ufUVFKBgvqTo\"",
"results = server.doGoogleSearch(key, 'warnes', 0, 10, False, \"\", \n False, \"\", \"utf-8\", \"utf-8\")",
"for i in range(le... |
#!/usr/bin/env python
import unittest
import os, re
import sys
sys.path.insert (1, '..')
import SOAPpy
ident = '$Id: testWSDL.py,v 1.2 2003/05/09 12:46:11 warnes Exp $'
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
http_proxy = "%s:%s" % (phost, pport)
except:
http_proxy = None
class IntegerArithmenticTestCase(unittest.TestCase):
def setUp(self):
self.wsdlstr1 = '''<?xml version="1.0"?>
<definitions name="TemperatureService" targetNamespace="http://www.xmethods.net/sd/TemperatureService.wsdl" xmlns:tns="http://www.xmethods.net/sd/TemperatureService.wsdl" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns="http://schemas.xmlsoap.org/wsdl/">
<message name="getTempRequest">
<part name="zipcode" type="xsd:string"/>
</message>
<message name="getTempResponse">
<part name="return" type="xsd:float"/>
</message>
<portType name="TemperaturePortType">
<operation name="getTemp">
<input message="tns:getTempRequest"/>
<output message="tns:getTempResponse"/>
</operation>
</portType>
<binding name="TemperatureBinding" type="tns:TemperaturePortType">
<soap:binding style="rpc" transport="http://schemas.xmlsoap.org/soap/http"/>
<operation name="getTemp">
<soap:operation soapAction=""/>
<input>
<soap:body use="encoded" namespace="urn:xmethods-Temperature" encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"/>
</input>
<output>
<soap:body use="encoded" namespace="urn:xmethods-Temperature" encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"/>
</output>
</operation>
</binding>
<service name="TemperatureService">
<documentation>Returns current temperature in a given U.S. zipcode </documentation>
<port name="TemperaturePort" binding="tns:TemperatureBinding">
<soap:address location="http://services.xmethods.net:80/soap/servlet/rpcrouter"/>
</port>
</service>
</definitions>
'''
def testParseWsdlString(self):
'''Parse XMethods TemperatureService wsdl from a string.'''
wsdl = SOAPpy.WSDL.Proxy(self.wsdlstr1, http_proxy=http_proxy)
self.assertEquals(len(wsdl.methods), 1)
method = wsdl.methods.values()[0]
self.assertEquals(method.methodName, 'getTemp')
self.assertEquals(method.namespace, 'urn:xmethods-Temperature')
self.assertEquals(method.location,
'http://services.xmethods.net:80/soap/servlet/rpcrouter')
def testParseWsdlFile(self):
'''Parse XMethods TemperatureService wsdl from a file.'''
# figure out path to the test directory
dir = os.path.abspath('.')
fname = './TemperatureService.wsdl'
try:
f = file(fname)
except (IOError, OSError):
self.assert_(0, 'Cound not find wsdl file "%s"' % file)
wsdl = SOAPpy.WSDL.Proxy(fname, http_proxy=http_proxy)
self.assertEquals(len(wsdl.methods), 1)
method = wsdl.methods.values()[0]
self.assertEquals(method.methodName, 'getTemp')
self.assertEquals(method.namespace, 'urn:xmethods-Temperature')
self.assertEquals(method.location,
'http://services.xmethods.net:80/soap/servlet/rpcrouter')
def testParseWsdlUrl(self):
'''Parse XMethods TemperatureService wsdl from a url.'''
wsdl = SOAPpy.WSDL.Proxy('http://www.xmethods.net/sd/2001/TemperatureService.wsdl', http_proxy=http_proxy)
self.assertEquals(len(wsdl.methods), 1)
method = wsdl.methods.values()[0]
self.assertEquals(method.methodName, 'getTemp')
self.assertEquals(method.namespace, 'urn:xmethods-Temperature')
self.assertEquals(method.location,
'http://services.xmethods.net:80/soap/servlet/rpcrouter')
def testGetTemp(self):
'''Parse TemperatureService and call getTemp.'''
zip = '01072'
proxy = SOAPpy.WSDL.Proxy(self.wsdlstr1, http_proxy=http_proxy)
temp = proxy.getTemp(zip)
print 'Temperature at', zip, 'is', temp
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0268,
0.0089,
0,
0.66,
0,
88,
0,
1,
0,
0,
88,
0,
0
],
[
1,
0,
0.0357,
0.0089,
0,
0.66,
0.125,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.0446,
0.0089,
0,
0.6... | [
"import unittest",
"import os, re",
"import sys",
"sys.path.insert (1, '..')",
"import SOAPpy",
"ident = '$Id: testWSDL.py,v 1.2 2003/05/09 12:46:11 warnes Exp $'",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n http_proxy =... |
#!/usr/bin/env python
ident = '$Id: speedTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import time
import sys
sys.path.insert(1, "..")
x='''<SOAP-ENV:Envelope
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/1999/XMLSchema">
<SOAP-ENV:Body>
<ns1:getRate xmlns:ns1="urn:demo1:exchange" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<country1 xsi:type="xsd:string">USA</country1>
<country2 xsi:type="xsd:string">japan</country2>
</ns1:getRate>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>'''
x2='''<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:i3="http://soapinterop.org/xsd" xmlns:i2="http://soapinterop.org/">
<SOAP-ENV:Body>
<i2:echoStructArray id="ref-1">
<return href="#ref-4"/>
</i2:echoStructArray>
<SOAP-ENC:Array id="ref-4" SOAP-ENC:arrayType="i3:SOAPStruct[3]">
<item href="#ref-5"/>
<item href="#ref-6"/>
<item href="#ref-7"/>
</SOAP-ENC:Array>
<i3:SOAPStruct id="ref-5">
<varString xsi:type="xsd:string">West Virginia</varString>
<varInt xsi:type="xsd:int">-546</varInt>
<varFloat xsi:type="xsd:float">-5.398</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-6">
<varString xsi:type="xsd:string">New Mexico</varString>
<varInt xsi:type="xsd:int">-641</varInt>
<varFloat xsi:type="xsd:float">-9.351</varFloat>
</i3:SOAPStruct>
<i3:SOAPStruct id="ref-7">
<varString xsi:type="xsd:string">Missouri</varString>
<varInt xsi:type="xsd:int">-819</varInt>
<varFloat xsi:type="xsd:float">1.495</varFloat>
</i3:SOAPStruct>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
# Import in function, because for some reason they slow each other
# down in same namespace ???
def SOAPParse(inxml):
from SOAPpy import parseSOAPRPC
t= time.time()
parseSOAPRPC(inxml)
return time.time()-t
def SAXParse(inxml):
import xml.sax
y = xml.sax.handler.ContentHandler()
t= time.time()
xml.sax.parseString(inxml,y)
return time.time()-t
def DOMParse(inxml):
import xml.dom.minidom
t= time.time()
xml.dom.minidom.parseString(inxml)
return time.time()-t
# Wierd but the SAX parser runs really slow the first time.
# Probably got to load a c module or something
SAXParse(x)
print
print "Simple XML"
print "SAX Parse, no marshalling ", SAXParse(x)
print "SOAP Parse, and marshalling ", SOAPParse(x)
print "DOM Parse, no marshalling ", DOMParse(x)
print
print "Complex XML (references)"
print "SAX Parse, no marshalling ", SAXParse(x2)
print "SOAP Parse, and marshalling ", SOAPParse(x2)
print "DOM Parse, no marshalling ", DOMParse(x2)
| [
[
1,
0,
0.0476,
0.0476,
0,
0.66,
0,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0952,
0.0476,
0,
0.66,
0.25,
509,
0,
1,
0,
0,
509,
0,
0
],
[
2,
0,
0.2381,
0.2381,
0,
0.... | [
"import time",
"import sys",
"def SOAPParse(inxml):\n from SOAPpy import parseSOAPRPC\n t= time.time()\n parseSOAPRPC(inxml)\n return time.time()-t",
" from SOAPpy import parseSOAPRPC",
" t= time.time()",
" parseSOAPRPC(inxml)",
" return time.time()-t",
"def SAXParse(inxml):\... |
#!/usr/bin/python
import sys
sys.path.insert(1, "..")
import SOAPpy
import time
import gc
import types
gc.set_debug(gc.DEBUG_SAVEALL)
for i in range(400):
try:
t = SOAPpy.SOAP.parseSOAPRPC('bad soap payload')
except: pass
gc.collect()
if len(gc.garbage):
print 'still leaking'
else:
print 'no leak'
| [
[
1,
0,
0.1429,
0.0476,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.1905,
0.0476,
0,
0.66,
0.1111,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.2381,
0.0476,
0,
0.... | [
"import sys",
"sys.path.insert(1, \"..\")",
"import SOAPpy",
"import time",
"import gc",
"import types",
"gc.set_debug(gc.DEBUG_SAVEALL)",
"for i in range(400):\n try:\n t = SOAPpy.SOAP.parseSOAPRPC('bad soap payload') \n except: pass",
" try:\n t = SOAPpy.SOAP.parseSOAPRPC('b... |
#!/usr/bin/python2
#standard imports
import syslog, sys
#domain specific imports
sys.path.insert (1, '..')
import SOAPpy
class test_service:
run = 1
def test_integer(self,pass_integer):
print type(pass_integer)
return pass_integer
def test_string(self,pass_string):
print type(pass_string)
return pass_string
def test_float(self,pass_float):
print type(pass_float)
return pass_float
def test_tuple(self,pass_tuple):
print type(pass_tuple), pass_tuple
return pass_tuple
def test_list(self,pass_list):
print type(pass_list), pass_list
return pass_list
def test_dictionary(self,pass_dictionary):
print type(pass_dictionary), pass_dictionary
return pass_dictionary
def quit(self):
self.run = 0
server = SOAPpy.SOAPServer(("localhost",9999))
SOAPpy.Config.simplify_objects=1
access_object = test_service()
server.registerObject(access_object)
while access_object.run:
server.handle_request()
| [
[
1,
0,
0.0833,
0.0208,
0,
0.66,
0,
76,
0,
2,
0,
0,
76,
0,
0
],
[
8,
0,
0.1458,
0.0208,
0,
0.66,
0.125,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.1667,
0.0208,
0,
0.66,... | [
"import syslog, sys",
"sys.path.insert (1, '..')",
"import SOAPpy",
"class test_service:\n\n run = 1\n \n def test_integer(self,pass_integer):\n print(type(pass_integer))\n return pass_integer",
" run = 1",
" def test_integer(self,pass_integer):\n print(type(pass_intege... |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert (1, '..')
from SOAPpy import *
ident = '$Id: cardClient.py,v 1.4 2004/02/18 21:22:13 warnes Exp $'
endpoint = "http://localhost:12027/xmethodsInterop"
sa = "urn:soapinterop"
ns = "http://soapinterop.org/"
serv = SOAPProxy(endpoint, namespace=ns, soapaction=sa)
try: hand = serv.dealHand(NumberOfCards = 13, StringSeparator = '\n')
except: print "no dealHand"; hand = 0
try: sortedhand = serv.dealArrangedHand(NumberOfCards=13,StringSeparator='\n')
except: print "no sorted"; sortedhand = 0
try: card = serv.dealCard()
except: print "no card"; card = 0
print "*****hand****\n",hand,"\n*********"
print "******sortedhand*****\n",sortedhand,"\n*********"
print "card:",card
serv.quit()
| [
[
1,
0,
0.25,
0.25,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.75,
0.25,
0,
0.66,
1,
181,
0,
1,
0,
0,
181,
0,
0
]
] | [
"import sys",
"from SOAPpy import *"
] |
#!/usr/bin/env python
import sys, unittest
sys.path.insert(1, "..")
from SOAPpy import *
Config.debug=1
class ClientTestCase(unittest.TestCase):
def testParseRules(self):
x = """<?xml version="1.0" encoding="utf-8"?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Body
soap:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<SomeMethod>
<Result>
<Book>
<title>My Life and Work</title>
</Book>
<Person>
<name>Henry Ford</name>
<age> 49 </age>
<height> 5.5 </height>
</Person>
</Result>
</SomeMethod>
</soap:Body>
</soap:Envelope>
"""
def negfloat(x):
return float(x) * -1.0
# parse rules
pr = {'SomeMethod':
{'Result':
{
'Book': {'title':'string'},
'Person': {'age':'int',
'height':negfloat}
}
}
}
y = parseSOAPRPC(x, rules=pr)
assert y.Result.Person.age == 49
assert y.Result.Person.height == -5.5
x = '''<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<item>12</item>
<item>23</item>
<item>0</item>
<item>-31</item>
</param>
<param1 xsi:null="1"></param1>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
# parse rules
pr = {'Bounds':
{'param': 'arrayType=string[]',
}
}
pr2 = {'Bounds':
{'param': 'arrayType=int[4]',
}
}
y = parseSOAPRPC(x, rules=pr)
assert y.param[1]=='23'
y = parseSOAPRPC(x, rules=pr2)
assert y.param[1]==23
x = '''<SOAP-ENV:Envelope
SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsd="http://www.w3.org/1999/XMLSchema"
xmlns:xsi="http://www.w3.org/1999/XMLSchema-instance">
<SOAP-ENV:Body>
<Bounds>
<param>
<item xsi:type="xsd:int">12</item>
<item xsi:type="xsd:string">23</item>
<item xsi:type="xsd:float">0</item>
<item xsi:type="xsd:int">-31</item>
</param>
<param1 xsi:null="1"></param1>
</Bounds>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
'''
pr = {'Bounds':
{'param': 'arrayType=ur-type[]'
}
}
y = parseSOAPRPC(x, rules=pr)
assert y.param[0]==12
assert y.param[1]=='23'
assert y.param[2]==float(0)
assert y.param[3]==-31
# Try the reverse, not implemented yet.
def testBuildObject(self):
class Book(structType):
def __init__(self):
self.title = "Title of a book"
class Person(structType):
def __init__(self):
self.age = "49"
self.height = "5.5"
class Library(structType):
def __init__(self):
self._name = "Result"
self.Book = Book()
self.Person = Person()
obj = Library()
x = buildSOAP( kw={'Library':obj} )
print(x)
if __name__ == '__main__':
unittest.main()
| [
[
1,
0,
0.0204,
0.0068,
0,
0.66,
0,
509,
0,
2,
0,
0,
509,
0,
0
],
[
8,
0,
0.0272,
0.0068,
0,
0.66,
0.2,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.034,
0.0068,
0,
0.66,
... | [
"import sys, unittest",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"Config.debug=1",
"class ClientTestCase(unittest.TestCase):\n def testParseRules(self):\n x = \"\"\"<?xml version=\"1.0\" encoding=\"utf-8\"?>\n <soap:Envelope xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope... |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: quoteTest.py,v 1.5 2003/12/18 06:31:50 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
# Three ways to do namespaces, force it at the server level
server = SOAPProxy("http://services.xmethods.com:9090/soap",
namespace = 'urn:xmethods-delayed-quotes',
http_proxy=proxy)
print "IBM>>", server.getQuote(symbol = 'IBM')
# Do it inline ala SOAP::LITE, also specify the actually ns
server = SOAPProxy("http://services.xmethods.com:9090/soap",
http_proxy=proxy)
print "IBM>>", server._ns('ns1',
'urn:xmethods-delayed-quotes').getQuote(symbol = 'IBM')
# Create a namespaced version of your server
dq = server._ns('urn:xmethods-delayed-quotes')
print "IBM>>", dq.getQuote(symbol='IBM')
print "ORCL>>", dq.getQuote(symbol='ORCL')
print "INTC>>", dq.getQuote(symbol='INTC')
| [
[
14,
0,
0.1053,
0.0263,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1579,
0.0263,
0,
0.66,
0.0833,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.1842,
0.0263,
0,
0... | [
"ident = '$Id: quoteTest.py,v 1.5 2003/12/18 06:31:50 warnes Exp $'",
"import os, re",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import SOAPProxy",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s:%s\... |
import sys
sys.path.insert(1, "..")
from SOAPpy import *
detailed_fault = \
"""
<?xml version="1.0" encoding="UTF-8"?>
<SOAP-ENV:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/" xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.microsoft.com/soap/encoding/clr/1.0 http://schemas.xmlsoap.org/soap/encoding/" xmlns:a1="http://schemas.microsoft.com/clr/ns/System.Runtime.Serialization.Formatters">
<SOAP-ENV:Body>
<SOAP-ENV:Fault id="ref-1">
<faultcode>soapenv:Server.generalException</faultcode>
<faultstring>Exception thrown on Server</faultstring>
<detail>
<loginFailureFault href="#id0"/>
<exceptionName xsi:type="xsd:string">...</exceptionName>
</detail>
</SOAP-ENV:Fault>
<multiRef id="id0">
<description xsi:type="xsd:string">Login failure (504):Unknown User</description>
<module xsi:type="xsd:string"> ... </module>
<timestamp xsi:type="xsd:string">...</timestamp>
<faultcode xsi:type="xsd:string"> ...</faultcode>
<parameter xsi:type="xsd:string"> ... </parameter>
</multiRef>
</SOAP-ENV:Body>
</SOAP-ENV:Envelope>
"""
z = parseSOAPRPC(detailed_fault.strip() )
assert(z.__class__==faultType)
assert(z.faultstring=="Exception thrown on Server")
assert(z.detail.loginFailureFault.description=='Login failure (504):Unknown User')
print "Success"
| [
[
1,
0,
0.0263,
0.0263,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.0526,
0.0263,
0,
0.66,
0.2,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.0789,
0.0263,
0,
0.66,... | [
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"detailed_fault = \\\n\"\"\"\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<SOAP-ENV:Envelope xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:SOAP-ENC=\"http://schemas.xmlsoap.org/so... |
#!/usr/bin/env python
ident = '$Id: weatherTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'
import os, re
import sys
sys.path.insert(1, "..")
from SOAPpy import SOAPProxy
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
SoapEndpointURL = 'http://services.xmethods.net:80/soap/servlet/rpcrouter'
MethodNamespaceURI = 'urn:xmethods-Temperature'
# Do it inline ala SOAP::LITE, also specify the actually ns
server = SOAPProxy(SoapEndpointURL, http_proxy=proxy)
print "inline", server._ns('ns1', MethodNamespaceURI).getTemp(zipcode='94063')
| [
[
14,
0,
0.12,
0.04,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2,
0.04,
0,
0.66,
0.1111,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.24,
0.04,
0,
0.66,
0.22... | [
"ident = '$Id: weatherTest.py,v 1.4 2003/05/21 14:52:37 warnes Exp $'",
"import os, re",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import SOAPProxy",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s:%... |
"""
Check handing of unicode.
"""
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
#Config.dumpHeadersIn = 1
#Config.dumpSOAPIn = 1
#Config.dumpSOAPOut = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 1
#Config.BuildWithNoType = 1
#Config.BuildWithNoNamespacePrefix = 1
def headers():
'''Return a soap header containing all the needed information.'''
hd = Types.headerType()
hd.useragent = Types.stringType("foo")
return hd
server = SOAPProxy("http://localhost:9900/",header=headers())
adgroupid = 197497504
keyword1 = { 'status': 'Moderate',
'adGroupId': 197497504,
'destinationURL': None,
'language': '',
'text': 'does not work',
'negative': bool(0),
'maxCpc': 50000,
'type': 'Keyword',
'id': 1 }
keyword2 = { 'status': 'Moderate',
'adGroupId': 197497504,
'destinationURL': None,
'language': '',
'text': 'yes it does not',
'negative': bool(0),
'maxCpc': 50000,
'type': 'Keyword',
'id': 2 }
keylist = [keyword1, keyword2]
# Check that the data goes through properly
retval = server.echo_simple(adgroupid, keylist)
kw1 = retval[1][0]
kw2 = retval[1][1]
assert(retval[0] == adgroupid)
for key in kw1.keys():
assert(kw1[key]==keyword1[key])
for key in kw2.keys():
assert(kw2[key]==keyword2[key])
# Check that the header is preserved
retval = server.echo_header((adgroupid, keylist))
assert(retval[1].has_key('useragent'))
assert(retval[1]['useragent'] == 'foo')
server.quit()
print "Success!"
| [
[
8,
0,
0.0267,
0.04,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0667,
0.0133,
0,
0.66,
0.0556,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.08,
0.0133,
0,
0.66,
... | [
"\"\"\"\nCheck handing of unicode.\n\"\"\"",
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"Config.simplify_objects = 1",
"def headers():\n '''Return a soap header containing all the needed information.'''\n hd = Types.headerType()\n hd.useragent = Types.stringType(\"foo\")\n ret... |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
ident = '$Id: alanbushTest.py,v 1.5 2003/05/21 14:52:37 warnes Exp $'
import os, re,sys
# add local SOAPpy code to search path
sys.path.insert(1, "..")
from SOAPpy import *
Config.debug=0
# Check for a web proxy definition in environment
try:
proxy_url=os.environ['http_proxy']
phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)
proxy = "%s:%s" % (phost, pport)
except:
proxy = None
SoapEndpointURL = 'http://www.alanbushtrust.org.uk/soap/compositions.asp'
MethodNamespaceURI = 'urn:alanbushtrust-org-uk:soap.methods'
SoapAction = MethodNamespaceURI + ".GetCategories"
server = SOAPProxy(SoapEndpointURL,
namespace=MethodNamespaceURI,
soapaction=SoapAction,
http_proxy=proxy
)
for category in server.GetCategories():
print category
| [
[
14,
0,
0.1471,
0.0294,
0,
0.66,
0,
977,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2059,
0.0294,
0,
0.66,
0.1,
688,
0,
3,
0,
0,
688,
0,
0
],
[
8,
0,
0.2941,
0.0294,
0,
0.66... | [
"ident = '$Id: alanbushTest.py,v 1.5 2003/05/21 14:52:37 warnes Exp $'",
"import os, re,sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"Config.debug=0",
"try:\n proxy_url=os.environ['http_proxy']\n phost, pport = re.search('http://([^:]+):([0-9]+)', proxy_url).group(1,2)\n proxy = \"%s:... |
#!/usr/bin/env python
import sys
sys.path.insert(1, "..")
from SOAPpy import *
server = SOAPProxy("http://206.135.217.234:8000/")
server.COM_SetProperty("Visible", 1)
server.Workbooks.Open("c:\\test.xls")
server.COM_NestedCall('ActiveSheet.Range("A2").EntireRow.Delete()')
server.quit()
| [
[
1,
0,
0.1667,
0.0556,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.2222,
0.0556,
0,
0.66,
0.1429,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.3333,
0.0556,
0,
0.... | [
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"server = SOAPProxy(\"http://206.135.217.234:8000/\")",
"server.COM_SetProperty(\"Visible\", 1)",
"server.Workbooks.Open(\"c:\\\\test.xls\")",
"server.COM_NestedCall('ActiveSheet.Range(\"A2\").EntireRow.Delete()')",
"server.quit()"
] |
#!/usr/bin/env python
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert(1, "..")
from SOAPpy import *
from SOAPpy import Parser
# Uncomment to see outgoing HTTP headers and SOAP and incoming
#Config.debug = 1
if len(sys.argv) > 1 and sys.argv[1] == '-s':
server = SOAPProxy("https://localhost:9900")
else:
server = SOAPProxy("http://localhost:9900")
# BIG data:
big = repr('.' * (1<<18) )
# ...in an object
print "server.echo_ino(big):..",
tmp = server.echo_ino(big)
print "done"
# ...in an object in an object
print "server.prop.echo2(big)..",
tmp = server.prop.echo2(big)
print "done"
# ...with keyword arguments
print 'server.echo_wkw(third = big, first = "one", second = "two")..',
tmp = server.echo_wkw(third = big, first = "one", second = "two")
print "done"
# ...with a context object
print "server.echo_wc(big)..",
tmp = server.echo_wc(big)
print "done"
# ...with a header
hd = headerType(data = {"mystring": "Hello World"})
print "server._hd(hd).echo_wc(big)..",
tmp = server._hd(hd).echo_wc(big)
print "done"
server.quit()
| [
[
1,
0,
0.1,
0.02,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.12,
0.02,
0,
0.66,
0.0455,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.16,
0.02,
0,
0.66,
0.090... | [
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"from SOAPpy import Parser",
"if len(sys.argv) > 1 and sys.argv[1] == '-s':\n server = SOAPProxy(\"https://localhost:9900\")\nelse:\n server = SOAPProxy(\"http://localhost:9900\")",
" server = SOAPProxy(\"https://localhost:9900\"... |
#!/usr/bin/env python
#
# Copyright (c) 2001 actzero, inc. All rights reserved.
import sys
sys.path.insert(1, "..")
from SOAPpy import *
# Uncomment to see outgoing HTTP headers and SOAP and incoming
Config.dumpSOAPIn = 1
Config.dumpSOAPOut = 1
Config.debug = 1
# specify name of authorization function
Config.authMethod = "_authorize"
# Set this to 0 to test authorization
allowAll = 1
# ask for returned SOAP responses to be converted to basic python types
Config.simplify_objects = 1
# provide a mechanism to stop the server
run = 1
def quit():
global run
run=0;
if Config.SSLserver:
from M2Crypto import SSL
def _authorize(*args, **kw):
global allowAll, Config
if Config.debug:
print "Authorize (function) called! (result = %d)" % allowAll
print "Arguments: %s" % kw
if allowAll:
return 1
else:
return 0
# Simple echo
def echo(s):
global Config
# Test of context retrieval
ctx = Server.GetSOAPContext()
if Config.debug:
print "SOAP Context: ", ctx
return s + s
# An echo class
class echoBuilder2:
def echo2(self, val):
return val * 3
# A class that has an instance variable which is an echo class
class echoBuilder:
def __init__(self):
self.prop = echoBuilder2()
def echo_ino(self, val):
return val + val
def _authorize(self, *args, **kw):
global allowAll, Config
if Config.debug:
print "Authorize (method) called with arguments:"
print "*args=%s" % str(args)
print "**kw =%s" % str(kw)
print "Approved -> %d" % allowAll
if allowAll:
return 1
else:
return 0
# Echo with context
def echo_wc(s, _SOAPContext):
global Config
c = _SOAPContext
sep = '-' * 72
# The Context object has extra info about the call
if Config.debug:
print "-- XML", sep[7:]
# The original XML request
print c.xmldata
print "-- Header", sep[10:]
# The SOAP Header or None if not present
print c.header
if c.header:
print "-- Header.mystring", sep[19:]
# An element of the SOAP Header
print c.header.mystring
print "-- Body", sep[8:]
# The whole Body object
print c.body
print "-- Peer", sep[8:]
if not GSI:
# The socket object, useful for
print c.connection.getpeername()
else:
# The socket object, useful for
print c.connection.get_remote_address()
ctx = c.connection.get_security_context()
print ctx.inquire()[0].display()
print "-- SOAPAction", sep[14:]
# The SOAPaction HTTP header
print c.soapaction
print "-- HTTP headers", sep[16:]
# All the HTTP headers
print c.httpheaders
return s + s
# Echo with keyword arguments
def echo_wkw(**kw):
return kw['first'] + kw['second'] + kw['third']
# Simple echo
def echo_simple(*arg):
return arg
def echo_header(s, _SOAPContext):
global Config
c = _SOAPContext
return s, c.header
addr = ('localhost', 9900)
GSI = 0
SSL = 0
if len(sys.argv) > 1 and sys.argv[1] == '-s':
SSL = 1
if not Config.SSLserver:
raise RuntimeError, \
"this Python installation doesn't have OpenSSL and M2Crypto"
ssl_context = SSL.Context()
ssl_context.load_cert('validate/server.pem')
server = SOAPServer(addr, ssl_context = ssl_context)
prefix = 'https'
elif len(sys.argv) > 1 and sys.argv[1] == '-g':
GSI = 1
from SOAPpy.GSIServer import GSISOAPServer
server = GSISOAPServer(addr)
prefix = 'httpg'
else:
server = SOAPServer(addr)
prefix = 'http'
print "Server listening at: %s://%s:%d/" % (prefix, addr[0], addr[1])
# register the method
server.registerFunction(echo)
server.registerFunction(echo, path = "/pathtest")
server.registerFunction(_authorize)
server.registerFunction(_authorize, path = "/pathtest")
# Register a whole object
o = echoBuilder()
server.registerObject(o, path = "/pathtest")
server.registerObject(o)
# Register a function which gets called with the Context object
server.registerFunction(MethodSig(echo_wc, keywords = 0, context = 1),
path = "/pathtest")
server.registerFunction(MethodSig(echo_wc, keywords = 0, context = 1))
# Register a function that takes keywords
server.registerKWFunction(echo_wkw, path = "/pathtest")
server.registerKWFunction(echo_wkw)
server.registerFunction(echo_simple)
server.registerFunction(MethodSig(echo_header, keywords=0, context=1))
server.registerFunction(quit)
# Start the server
try:
while run:
server.handle_request()
except KeyboardInterrupt:
pass
| [
[
1,
0,
0.0254,
0.0051,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
8,
0,
0.0305,
0.0051,
0,
0.66,
0.0256,
368,
3,
2,
0,
0,
0,
0,
1
],
[
1,
0,
0.0406,
0.0051,
0,
0.... | [
"import sys",
"sys.path.insert(1, \"..\")",
"from SOAPpy import *",
"Config.dumpSOAPIn = 1",
"Config.dumpSOAPOut = 1",
"Config.debug = 1",
"Config.authMethod = \"_authorize\"",
"allowAll = 1",
"Config.simplify_objects = 1",
"run = 1",
"def quit():\n global run\n run=0;",
" run=0;",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.