code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Http tests
Unit tests for the apiclient.http.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import unittest
from apiclient.http import set_user_agent
from apiclient.http import HttpMockSequence
class TestUserAgent(unittest.TestCase):
def test_set_user_agent(self):
http = HttpMockSequence([
({'status': '200'}, 'echo_request_headers'),
])
http = set_user_agent(http, "my_app/5.5")
resp, content = http.request("http://example.com")
self.assertEqual(content['user-agent'], 'my_app/5.5')
def test_set_user_agent_nested(self):
http = HttpMockSequence([
({'status': '200'}, 'echo_request_headers'),
])
http = set_user_agent(http, "my_app/5.5")
http = set_user_agent(http, "my_library/0.1")
resp, content = http.request("http://example.com")
self.assertEqual(content['user-agent'], 'my_app/5.5 my_library/0.1')
if __name__ == '__main__':
unittest.main()
| [
[
8,
0,
0.3558,
0.0769,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.4231,
0.0192,
0,
0.66,
0.1667,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.4615,
0.0192,
0,
0.66,... | [
"\"\"\"Http tests\n\nUnit tests for the apiclient.http.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import unittest",
"from apiclient.http import set_user_agent",
"from apiclient.http import HttpMockSequence",
"class TestUserAgent(unittest.TestCase):\n\n def test_set_user_agent(self... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
# Copyright 2010 Google Inc. All Rights Reserved.
"""Classes to encapsulate a single HTTP request.
The classes implement a command pattern, with every
object supporting an execute() method that does the
actuall HTTP request.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'HttpRequest', 'RequestMockBuilder', 'HttpMock'
'set_user_agent', 'tunnel_patch'
]
import httplib2
import os
from model import JsonModel
from errors import HttpError
from anyjson import simplejson
class HttpRequest(object):
"""Encapsulates a single HTTP request.
"""
def __init__(self, http, postproc, uri,
method='GET',
body=None,
headers=None,
methodId=None):
"""Constructor for an HttpRequest.
Args:
http: httplib2.Http, the transport object to use to make a request
postproc: callable, called on the HTTP response and content to transform
it into a data object before returning, or raising an exception
on an error.
uri: string, the absolute URI to send the request to
method: string, the HTTP method to use
body: string, the request body of the HTTP request
headers: dict, the HTTP request headers
methodId: string, a unique identifier for the API method being called.
"""
self.uri = uri
self.method = method
self.body = body
self.headers = headers or {}
self.http = http
self.postproc = postproc
def execute(self, http=None):
"""Execute the request.
Args:
http: httplib2.Http, an http object to be used in place of the
one the HttpRequest request object was constructed with.
Returns:
A deserialized object model of the response body as determined
by the postproc.
Raises:
apiclient.errors.HttpError if the response was not a 2xx.
httplib2.Error if a transport error has occured.
"""
if http is None:
http = self.http
resp, content = http.request(self.uri, self.method,
body=self.body,
headers=self.headers)
if resp.status >= 300:
raise HttpError(resp, content, self.uri)
return self.postproc(resp, content)
class HttpRequestMock(object):
"""Mock of HttpRequest.
Do not construct directly, instead use RequestMockBuilder.
"""
def __init__(self, resp, content, postproc):
"""Constructor for HttpRequestMock
Args:
resp: httplib2.Response, the response to emulate coming from the request
content: string, the response body
postproc: callable, the post processing function usually supplied by
the model class. See model.JsonModel.response() as an example.
"""
self.resp = resp
self.content = content
self.postproc = postproc
if resp is None:
self.resp = httplib2.Response({'status': 200, 'reason': 'OK'})
if 'reason' in self.resp:
self.resp.reason = self.resp['reason']
def execute(self, http=None):
"""Execute the request.
Same behavior as HttpRequest.execute(), but the response is
mocked and not really from an HTTP request/response.
"""
return self.postproc(self.resp, self.content)
class RequestMockBuilder(object):
"""A simple mock of HttpRequest
Pass in a dictionary to the constructor that maps request methodIds to
tuples of (httplib2.Response, content) that should be returned when that
method is called. None may also be passed in for the httplib2.Response, in
which case a 200 OK response will be generated.
Example:
response = '{"data": {"id": "tag:google.c...'
requestBuilder = RequestMockBuilder(
{
'chili.activities.get': (None, response),
}
)
apiclient.discovery.build("buzz", "v1", requestBuilder=requestBuilder)
Methods that you do not supply a response for will return a
200 OK with an empty string as the response content. The methodId
is taken from the rpcName in the discovery document.
For more details see the project wiki.
"""
def __init__(self, responses):
"""Constructor for RequestMockBuilder
The constructed object should be a callable object
that can replace the class HttpResponse.
responses - A dictionary that maps methodIds into tuples
of (httplib2.Response, content). The methodId
comes from the 'rpcName' field in the discovery
document.
"""
self.responses = responses
def __call__(self, http, postproc, uri, method='GET', body=None,
headers=None, methodId=None):
"""Implements the callable interface that discovery.build() expects
of requestBuilder, which is to build an object compatible with
HttpRequest.execute(). See that method for the description of the
parameters and the expected response.
"""
if methodId in self.responses:
resp, content = self.responses[methodId]
return HttpRequestMock(resp, content, postproc)
else:
model = JsonModel(False)
return HttpRequestMock(None, '{}', model.response)
class HttpMock(object):
"""Mock of httplib2.Http"""
def __init__(self, filename, headers=None):
"""
Args:
filename: string, absolute filename to read response from
headers: dict, header to return with response
"""
if headers is None:
headers = {'status': '200 OK'}
f = file(filename, 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
return httplib2.Response(self.headers), self.data
class HttpMockSequence(object):
"""Mock of httplib2.Http
Mocks a sequence of calls to request returning different responses for each
call. Create an instance initialized with the desired response headers
and content and then use as if an httplib2.Http instance.
http = HttpMockSequence([
({'status': '401'}, ''),
({'status': '200'}, '{"access_token":"1/3w","expires_in":3600}'),
({'status': '200'}, 'echo_request_headers'),
])
resp, content = http.request("http://examples.com")
There are special values you can pass in for content to trigger
behavours that are helpful in testing.
'echo_request_headers' means return the request headers in the response body
'echo_request_headers_as_json' means return the request headers in
the response body
'echo_request_body' means return the request body in the response body
"""
def __init__(self, iterable):
"""
Args:
iterable: iterable, a sequence of pairs of (headers, body)
"""
self._iterable = iterable
def request(self, uri,
method='GET',
body=None,
headers=None,
redirections=1,
connection_type=None):
resp, content = self._iterable.pop(0)
if content == 'echo_request_headers':
content = headers
elif content == 'echo_request_headers_as_json':
content = simplejson.dumps(headers)
elif content == 'echo_request_body':
content = body
return httplib2.Response(resp), content
def set_user_agent(http, user_agent):
"""Set the user-agent on every request.
Args:
http - An instance of httplib2.Http
or something that acts like it.
user_agent: string, the value for the user-agent header.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = set_user_agent(h, "my-app-name/6.0")
Most of the time the user-agent will be set doing auth, this is for the rare
cases where you are accessing an unauthenticated endpoint.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if 'user-agent' in headers:
headers['user-agent'] = user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
def tunnel_patch(http):
"""Tunnel PATCH requests over POST.
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = tunnel_patch(h, "my-app-name/6.0")
Useful if you are running on a platform that doesn't support PATCH.
Apply this last if you are using OAuth 1.0, as changing the method
will result in a different signature.
"""
request_orig = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the user-agent."""
if headers is None:
headers = {}
if method == 'PATCH':
if 'oauth_token' in headers.get('authorization', ''):
logging.warning(
'OAuth 1.0 request made with Credentials after tunnel_patch.')
headers['x-http-method-override'] = "PATCH"
method = 'POST'
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
return resp, content
http.request = new_request
return http
| [
[
8,
0,
0.0176,
0.0192,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0321,
0.0032,
0,
0.66,
0.0714,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0401,
0.0128,
0,
0.66... | [
"\"\"\"Classes to encapsulate a single HTTP request.\n\nThe classes implement a command pattern, with every\nobject supporting an execute() method that does the\nactuall HTTP request.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"__all__ = [\n 'HttpRequest', 'RequestMockBuilder', 'HttpMoc... |
# Copyright 2010 Google Inc. All Rights Reserved.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
import httplib2
import logging
import oauth2 as oauth
import urllib
import urlparse
from anyjson import simplejson
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
class Error(Exception):
"""Base error for this module."""
pass
class RequestError(Error):
"""Error occurred during request."""
pass
class MissingParameter(Error):
pass
class CredentialsInvalidError(Error):
pass
def _abstract():
raise NotImplementedError('You need to override this function')
def _oauth_uri(name, discovery, params):
"""Look up the OAuth URI from the discovery
document and add query parameters based on
params.
name - The name of the OAuth URI to lookup, one
of 'request', 'access', or 'authorize'.
discovery - Portion of discovery document the describes
the OAuth endpoints.
params - Dictionary that is used to form the query parameters
for the specified URI.
"""
if name not in ['request', 'access', 'authorize']:
raise KeyError(name)
keys = discovery[name]['parameters'].keys()
query = {}
for key in keys:
if key in params:
query[key] = params[key]
return discovery[name]['url'] + '?' + urllib.urlencode(query)
class Credentials(object):
"""Base class for all Credentials objects.
Subclasses must define an authorize() method
that applies the credentials to an HTTP transport.
"""
def authorize(self, http):
"""Take an httplib2.Http instance (or equivalent) and
authorizes it for the set of credentials, usually by
replacing http.request() with a method that adds in
the appropriate headers and then delegates to the original
Http.request() method.
"""
_abstract()
class Flow(object):
"""Base class for all Flow objects."""
pass
class Storage(object):
"""Base class for all Storage objects.
Store and retrieve a single credential.
"""
def get(self):
"""Retrieve credential.
Returns:
apiclient.oauth.Credentials
"""
_abstract()
def put(self, credentials):
"""Write a credential.
Args:
credentials: Credentials, the credentials to store.
"""
_abstract()
class OAuthCredentials(Credentials):
"""Credentials object for OAuth 1.0a
"""
def __init__(self, consumer, token, user_agent):
"""
consumer - An instance of oauth.Consumer.
token - An instance of oauth.Token constructed with
the access token and secret.
user_agent - The HTTP User-Agent to provide for this application.
"""
self.consumer = consumer
self.token = token
self.user_agent = user_agent
self.store = None
# True if the credentials have been revoked
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def set_store(self, store):
"""Set the storage for the credential.
Args:
store: callable, a callable that when passed a Credential
will store the credential back to where it came from.
This is needed to store the latest access_token if it
has been revoked.
"""
self.store = store
def __getstate__(self):
"""Trim the state down to something that can be pickled."""
d = copy.copy(self.__dict__)
del d['store']
return d
def __setstate__(self, state):
"""Reconstitute the state of the object from being pickled."""
self.__dict__.update(state)
self.store = None
def authorize(self, http):
"""
Args:
http - An instance of httplib2.Http
or something that acts like it.
Returns:
A modified instance of http that was passed in.
Example:
h = httplib2.Http()
h = credentials.authorize(h)
You can't create a new OAuth
subclass of httplib2.Authenication because
it never gets passed the absolute URI, which is
needed for signing. So instead we have to overload
'request' with a closure that adds in the
Authorization header and then calls the original version
of 'request()'.
"""
request_orig = http.request
signer = oauth.SignatureMethod_HMAC_SHA1()
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Modify the request headers to add the appropriate
Authorization header."""
response_code = 302
http.follow_redirects = False
while response_code in [301, 302]:
req = oauth.Request.from_consumer_and_token(
self.consumer, self.token, http_method=method, http_url=uri)
req.sign_request(signer, self.consumer, self.token)
if headers is None:
headers = {}
headers.update(req.to_header())
if 'user-agent' in headers:
headers['user-agent'] = self.user_agent + ' ' + headers['user-agent']
else:
headers['user-agent'] = self.user_agent
resp, content = request_orig(uri, method, body, headers,
redirections, connection_type)
response_code = resp.status
if response_code in [301, 302]:
uri = resp['location']
# Update the stored credential if it becomes invalid.
if response_code == 401:
logging.info('Access token no longer valid: %s' % content)
self._invalid = True
if self.store is not None:
self.store(self)
raise CredentialsInvalidError("Credentials are no longer valid.")
return resp, content
http.request = new_request
return http
class FlowThreeLegged(Flow):
"""Does the Three Legged Dance for OAuth 1.0a.
"""
def __init__(self, discovery, consumer_key, consumer_secret, user_agent,
**kwargs):
"""
discovery - Section of the API discovery document that describes
the OAuth endpoints.
consumer_key - OAuth consumer key
consumer_secret - OAuth consumer secret
user_agent - The HTTP User-Agent that identifies the application.
**kwargs - The keyword arguments are all optional and required
parameters for the OAuth calls.
"""
self.discovery = discovery
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_agent = user_agent
self.params = kwargs
self.request_token = {}
required = {}
for uriinfo in discovery.itervalues():
for name, value in uriinfo['parameters'].iteritems():
if value['required'] and not name.startswith('oauth_'):
required[name] = 1
for key in required.iterkeys():
if key not in self.params:
raise MissingParameter('Required parameter %s not supplied' % key)
def step1_get_authorize_url(self, oauth_callback='oob'):
"""Returns a URI to redirect to the provider.
oauth_callback - Either the string 'oob' for a non-web-based application,
or a URI that handles the callback from the authorization
server.
If oauth_callback is 'oob' then pass in the
generated verification code to step2_exchange,
otherwise pass in the query parameters received
at the callback uri to step2_exchange.
"""
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
body = urllib.urlencode({'oauth_callback': oauth_callback})
uri = _oauth_uri('request', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers,
body=body)
if resp['status'] != '200':
logging.error('Failed to retrieve temporary authorization: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
self.request_token = dict(parse_qsl(content))
auth_params = copy.copy(self.params)
auth_params['oauth_token'] = self.request_token['oauth_token']
return _oauth_uri('authorize', self.discovery, auth_params)
def step2_exchange(self, verifier):
"""Exhanges an authorized request token
for OAuthCredentials.
Args:
verifier: string, dict - either the verifier token, or a dictionary
of the query parameters to the callback, which contains
the oauth_verifier.
Returns:
The Credentials object.
"""
if not (isinstance(verifier, str) or isinstance(verifier, unicode)):
verifier = verifier['oauth_verifier']
token = oauth.Token(
self.request_token['oauth_token'],
self.request_token['oauth_token_secret'])
token.set_verifier(verifier)
consumer = oauth.Consumer(self.consumer_key, self.consumer_secret)
client = oauth.Client(consumer, token)
headers = {
'user-agent': self.user_agent,
'content-type': 'application/x-www-form-urlencoded'
}
uri = _oauth_uri('access', self.discovery, self.params)
resp, content = client.request(uri, 'POST', headers=headers)
if resp['status'] != '200':
logging.error('Failed to retrieve access token: %s', content)
raise RequestError('Invalid response %s.' % resp['status'])
oauth_params = dict(parse_qsl(content))
token = oauth.Token(
oauth_params['oauth_token'],
oauth_params['oauth_token_secret'])
return OAuthCredentials(consumer, token, self.user_agent)
| [
[
8,
0,
0.0138,
0.0122,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0245,
0.0031,
0,
0.66,
0.05,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0336,
0.0031,
0,
0.66,
... | [
"\"\"\"Utilities for OAuth.\n\nUtilities for making it easier to work with OAuth.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import copy",
"import httplib2",
"import logging",
"import oauth2 as oauth",
"import urllib",
"import urlparse",
"from anyjson import simplejson",
"tr... |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from anyjson import simplejson
from errors import HttpError
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
'Must use apiclient.model.LoggingJsonModel as '
'the model.'
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a deserialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class JsonModel(Model):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
def __init__(self, data_wrapper=False):
"""Construct a JsonModel
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with JSON bodies.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = 'application/json'
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
if body_value is not None:
headers['content-type'] = 'application/json'
body_value = simplejson.dumps(body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
params.update({'alt': 'json'})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return simplejson.loads('{}')
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
class LoggingJsonModel(JsonModel):
"""A printable JsonModel class that supports logging response info."""
def response(self, resp, content):
"""An overloaded response method that will output debug info if requested.
Args:
resp: An httplib2.Response object.
content: A string representing the response body.
Returns:
The body de-serialized as a Python object.
"""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
return super(LoggingJsonModel, self).response(
resp, content)
def request(self, headers, path_params, query_params, body_value):
"""An overloaded request method that will output debug info if requested.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
(headers, path_params, query, body) = super(
LoggingJsonModel, self).request(
headers, path_params, query_params, body_value)
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
return (headers, path_params, query, body)
| [
[
8,
0,
0.0342,
0.0299,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0556,
0.0043,
0,
0.66,
0.0833,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0641,
0.0043,
0,
0.66,... | [
"\"\"\"Model objects for requests and responses.\n\nEach API may support one or more serializations, such\nas JSON, Atom, etc. The model classes are responsible\nfor converting between the wire format and the Python\nobject representation.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import... |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from anyjson import simplejson
from errors import HttpError
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
'Must use apiclient.model.LoggingJsonModel as '
'the model.'
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a deserialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class JsonModel(Model):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
def __init__(self, data_wrapper=False):
"""Construct a JsonModel
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with JSON bodies.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = 'application/json'
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
if body_value is not None:
headers['content-type'] = 'application/json'
body_value = simplejson.dumps(body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
params.update({'alt': 'json'})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return simplejson.loads('{}')
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
class LoggingJsonModel(JsonModel):
"""A printable JsonModel class that supports logging response info."""
def response(self, resp, content):
"""An overloaded response method that will output debug info if requested.
Args:
resp: An httplib2.Response object.
content: A string representing the response body.
Returns:
The body de-serialized as a Python object.
"""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
return super(LoggingJsonModel, self).response(
resp, content)
def request(self, headers, path_params, query_params, body_value):
"""An overloaded request method that will output debug info if requested.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
(headers, path_params, query, body) = super(
LoggingJsonModel, self).request(
headers, path_params, query_params, body_value)
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
return (headers, path_params, query, body)
| [
[
8,
0,
0.0342,
0.0299,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0556,
0.0043,
0,
0.66,
0.0833,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0641,
0.0043,
0,
0.66,... | [
"\"\"\"Model objects for requests and responses.\n\nEach API may support one or more serializations, such\nas JSON, Atom, etc. The model classes are responsible\nfor converting between the wire format and the Python\nobject representation.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import... |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content.
"""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
| [
[
8,
0,
0.1167,
0.0833,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1833,
0.0167,
0,
0.66,
0.1667,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2333,
0.0167,
0,
0.66,... | [
"\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"from anyjson import simplejson",
"class Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n pass",
" \"\"\"Base error for th... |
# Copyright 2010 Google Inc. All Rights Reserved.
"""Utilities for OAuth.
Utilities for making it easier to work with OAuth 1.0 credentials.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
import threading
from apiclient.oauth import Storage as BaseStorage
class Storage(BaseStorage):
"""Store and retrieve a single credential to and from a file."""
def __init__(self, filename):
self._filename = filename
self._lock = threading.Lock()
def get(self):
"""Retrieve Credential from file.
Returns:
apiclient.oauth.Credentials
"""
self._lock.acquire()
try:
f = open(self._filename, 'r')
credentials = pickle.loads(f.read())
f.close()
credentials.set_store(self.put)
except:
credentials = None
self._lock.release()
return credentials
def put(self, credentials):
"""Write a pickled Credentials to file.
Args:
credentials: Credentials, the credentials to store.
"""
self._lock.acquire()
f = open(self._filename, 'w')
f.write(pickle.dumps(credentials))
f.close()
self._lock.release()
| [
[
8,
0,
0.0882,
0.0784,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1569,
0.0196,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1961,
0.0196,
0,
0.66,
... | [
"\"\"\"Utilities for OAuth.\n\nUtilities for making it easier to work with OAuth 1.0 credentials.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import pickle",
"import threading",
"from apiclient.oauth import Storage as BaseStorage",
"class Storage(BaseStorage):\n \"\"\"Store and retr... |
import apiclient
import base64
import pickle
from django.db import models
class OAuthCredentialsField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
if value is None:
return None
if isinstance(value, apiclient.oauth.Credentials):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
class FlowThreeLeggedField(models.Field):
__metaclass__ = models.SubfieldBase
def db_type(self):
return 'VARCHAR'
def to_python(self, value):
print "In to_python", value
if value is None:
return None
if isinstance(value, apiclient.oauth.FlowThreeLegged):
return value
return pickle.loads(base64.b64decode(value))
def get_db_prep_value(self, value):
return base64.b64encode(pickle.dumps(value))
| [
[
1,
0,
0.0238,
0.0238,
0,
0.66,
0,
629,
0,
1,
0,
0,
629,
0,
0
],
[
1,
0,
0.0476,
0.0238,
0,
0.66,
0.2,
177,
0,
1,
0,
0,
177,
0,
0
],
[
1,
0,
0.0714,
0.0238,
0,
0.6... | [
"import apiclient",
"import base64",
"import pickle",
"from django.db import models",
"class OAuthCredentialsField(models.Field):\n\n __metaclass__ = models.SubfieldBase\n\n def db_type(self):\n return 'VARCHAR'\n\n def to_python(self, value):",
" __metaclass__ = models.SubfieldBase",
" def db_t... |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google App Engine
Utilities for making it easier to use the
Google API Client for Python on Google App Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pickle
from google.appengine.ext import db
from apiclient.oauth import OAuthCredentials
from apiclient.oauth import FlowThreeLegged
class FlowThreeLeggedProperty(db.Property):
"""Utility property that allows easy
storage and retreival of an
apiclient.oauth.FlowThreeLegged"""
# Tell what the user type is.
data_type = FlowThreeLegged
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
flow = super(FlowThreeLeggedProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(flow))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, FlowThreeLegged):
raise BadValueError('Property %s must be convertible '
'to a FlowThreeLegged instance (%s)' %
(self.name, value))
return super(FlowThreeLeggedProperty, self).validate(value)
def empty(self, value):
return not value
class OAuthCredentialsProperty(db.Property):
"""Utility property that allows easy
storage and retrieval of
apiclient.oath.OAuthCredentials
"""
# Tell what the user type is.
data_type = OAuthCredentials
# For writing to datastore.
def get_value_for_datastore(self, model_instance):
cred = super(OAuthCredentialsProperty,
self).get_value_for_datastore(model_instance)
return db.Blob(pickle.dumps(cred))
# For reading from datastore.
def make_value_from_datastore(self, value):
if value is None:
return None
return pickle.loads(value)
def validate(self, value):
if value is not None and not isinstance(value, OAuthCredentials):
raise BadValueError('Property %s must be convertible '
'to an OAuthCredentials instance (%s)' %
(self.name, value))
return super(OAuthCredentialsProperty, self).validate(value)
def empty(self, value):
return not value
class StorageByKeyName(object):
"""Store and retrieve a single credential to and from
the App Engine datastore.
This Storage helper presumes the Credentials
have been stored as a CredenialsProperty
on a datastore model class, and that entities
are stored by key_name.
"""
def __init__(self, model, key_name, property_name):
"""Constructor for Storage.
Args:
model: db.Model, model class
key_name: string, key name for the entity that has the credentials
property_name: string, name of the property that is a CredentialsProperty
"""
self.model = model
self.key_name = key_name
self.property_name = property_name
def get(self):
"""Retrieve Credential from datastore.
Returns:
Credentials
"""
entity = self.model.get_or_insert(self.key_name)
credential = getattr(entity, self.property_name)
if credential and hasattr(credential, 'set_store'):
credential.set_store(self.put)
return credential
def put(self, credentials):
"""Write a Credentials to the datastore.
Args:
credentials: Credentials, the credentials to store.
"""
entity = self.model.get_or_insert(self.key_name)
setattr(entity, self.property_name, credentials)
entity.put()
| [
[
8,
0,
0.1259,
0.037,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1556,
0.0074,
0,
0.66,
0.125,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1704,
0.0074,
0,
0.66,
... | [
"\"\"\"Utilities for Google App Engine\n\nUtilities for making it easier to use the\nGoogle API Client for Python on Google App Engine.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import pickle",
"from google.appengine.ext import db",
"from apiclient.oauth import OAuthCredentials",
"... |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content.
"""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
| [
[
8,
0,
0.1167,
0.0833,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1833,
0.0167,
0,
0.66,
0.1667,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2333,
0.0167,
0,
0.66,... | [
"\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"from anyjson import simplejson",
"class Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n pass",
" \"\"\"Base error for th... |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module to import a JSON module
Hides all the messy details of exactly where
we get a simplejson module from.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
try: # pragma: no cover
import simplejson
except ImportError: # pragma: no cover
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
| [
[
8,
0,
0.5312,
0.1562,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.6562,
0.0312,
0,
0.66,
0.5,
777,
1,
0,
0,
0,
0,
3,
0
],
[
7,
0,
0.875,
0.2812,
0,
0.66,
... | [
"\"\"\"Utility module to import a JSON module\n\nHides all the messy details of exactly where\nwe get a simplejson module from.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"try: # pragma: no cover\n import simplejson\nexcept ImportError: # pragma: no cover\n try:\n # Try to import from... |
# Early, and incomplete implementation of -04.
#
import re
import urllib
RESERVED = ":/?#[]@!$&'()*+,;="
OPERATOR = "+./;?|!@"
EXPLODE = "*+"
MODIFIER = ":^"
TEMPLATE = re.compile(r"{(?P<operator>[\+\./;\?|!@])?(?P<varlist>[^}]+)}", re.UNICODE)
VAR = re.compile(r"^(?P<varname>[^=\+\*:\^]+)((?P<explode>[\+\*])|(?P<partial>[:\^]-?[0-9]+))?(=(?P<default>.*))?$", re.UNICODE)
def _tostring(varname, value, explode, operator, safe=""):
if type(value) == type([]):
if explode == "+":
return ",".join([varname + "." + urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
if type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return ",".join([varname + "." + urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return urllib.quote(value, safe)
def _tostring_path(varname, value, explode, operator, safe=""):
joiner = operator
if type(value) == type([]):
if explode == "+":
return joiner.join([varname + "." + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return urllib.quote(value, safe)
else:
return ""
def _tostring_query(varname, value, explode, operator, safe=""):
joiner = operator
varprefix = ""
if operator == "?":
joiner = "&"
varprefix = varname + "="
if type(value) == type([]):
if 0 == len(value):
return ""
if explode == "+":
return joiner.join([varname + "=" + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return varprefix + ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
if 0 == len(value):
return ""
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
else:
return varprefix + ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return varname + "=" + urllib.quote(value, safe)
else:
return varname
TOSTRING = {
"" : _tostring,
"+": _tostring,
";": _tostring_query,
"?": _tostring_query,
"/": _tostring_path,
".": _tostring_path,
}
def expand(template, vars):
def _sub(match):
groupdict = match.groupdict()
operator = groupdict.get('operator')
if operator is None:
operator = ''
varlist = groupdict.get('varlist')
safe = "@"
if operator == '+':
safe = RESERVED
varspecs = varlist.split(",")
varnames = []
defaults = {}
for varspec in varspecs:
m = VAR.search(varspec)
groupdict = m.groupdict()
varname = groupdict.get('varname')
explode = groupdict.get('explode')
partial = groupdict.get('partial')
default = groupdict.get('default')
if default:
defaults[varname] = default
varnames.append((varname, explode, partial))
retval = []
joiner = operator
prefix = operator
if operator == "+":
prefix = ""
joiner = ","
if operator == "?":
joiner = "&"
if operator == "":
joiner = ","
for varname, explode, partial in varnames:
if varname in vars:
value = vars[varname]
#if not value and (type(value) == type({}) or type(value) == type([])) and varname in defaults:
if not value and value != "" and varname in defaults:
value = defaults[varname]
elif varname in defaults:
value = defaults[varname]
else:
continue
retval.append(TOSTRING[operator](varname, value, explode, operator, safe=safe))
if "".join(retval):
return prefix + joiner.join(retval)
else:
return ""
return TEMPLATE.sub(_sub, template)
| [
[
1,
0,
0.0204,
0.0068,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0272,
0.0068,
0,
0.66,
0.0833,
614,
0,
1,
0,
0,
614,
0,
0
],
[
14,
0,
0.0408,
0.0068,
0,
... | [
"import re",
"import urllib",
"RESERVED = \":/?#[]@!$&'()*+,;=\"",
"OPERATOR = \"+./;?|!@\"",
"EXPLODE = \"*+\"",
"MODIFIER = \":^\"",
"TEMPLATE = re.compile(r\"{(?P<operator>[\\+\\./;\\?|!@])?(?P<varlist>[^}]+)}\", re.UNICODE)",
"VAR = re.compile(r\"^(?P<varname>[^=\\+\\*:\\^]+)((?P<explode>[\\+\\*])... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
"""SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
import struct
import sys
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
# Small hack for Python 2.x
if sys.version_info[0] <= 2:
def bytes(obj, enc=None):
return obj
class ProxyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class GeneralProxyError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5AuthError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks5Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Socks4Error(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class HTTPError(ProxyError):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
("request rejected because SOCKS server cannot connect to "
"identd on the client"),
("request rejected because the client program and identd"
" report different user-ids"),
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True,
username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=0, _sock=None):
socket.socket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __decode(self, bytes):
if getattr(bytes, 'decode', False):
try:
bytes = bytes.decode()
except Exception:
pass
return bytes
def __encode(self, bytes):
if getattr(bytes, 'encode', False):
try:
bytes = bytes.encode()
except Exception:
pass
return bytes
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = bytes("")
while len(data) < count:
d = self.recv(count - len(data))
if not d:
raise GeneralProxyError(
(0, "connection closed unexpectedly"))
data = data + self.__decode(d)
return data
def sendall(self, bytes):
socket.socket.sendall(self, self.__encode(bytes))
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True,
username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4] != None) and (self.__proxy[5] != None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall("\x05\x02\x00\x02")
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall("\x05\x01\x00")
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0] != "\x05":
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1] == "\x00":
# No authentication is required
pass
elif chosenauth[1] == "\x02":
# Okay, we need to perform a basic username/password
# authentication.
self.sendall("\x01" + chr(len(self.__proxy[4])) + self.__proxy[4] +
chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0] != "\x01":
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1] != "\x00":
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == "\xFF":
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = "\x05\x01\x00"
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + "\x01" + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3] == True:
# Resolve remotely
ipaddr = None
req = req + "\x03" + chr(len(destaddr)) + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + "\x01" + ipaddr
req = req + self.__decode(struct.pack(">H", destport))
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0] != "\x05":
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1] != "\x00":
# Connection failed
self.close()
if ord(resp[1]) <= 8:
raise Socks5Error((ord(resp[1]), _socks5errors[ord(resp[1])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3] == "\x01":
boundaddr = self.__recvall(4)
elif resp[3] == "\x03":
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4]))
else:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
boundport = struct.unpack(">H", bytes(self.__recvall(2), 'utf8'))[0]
self.__proxysockname = boundaddr, boundport
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return socket.socket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self, destaddr, destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3] == True:
ipaddr = "\x00\x00\x00\x01"
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = "\x04\x01" + self.__decode(struct.pack(">H", destport)) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + "\x00"
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv==True:
req = req + destaddr + "\x00"
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0] != "\x00":
# Bad data
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if resp[1] != "\x5A":
# Server returned an error
self.close()
if ord(resp[1]) in (91,92,93):
self.close()
raise Socks4Error((ord(resp[1]), _socks4errors[ord(resp[1])-90]))
else:
raise Socks4Error((94,_socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]),struct.unpack(">H",bytes(resp[2:4],'utf8'))[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr),destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if self.__proxy[3] == False:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT %s:%s HTTP/1.1\r\n"
"Host: %s\r\n\r\n") % (addr, destport, destaddr))
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n") == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ", 2)
if statusline[0] not in ("HTTP/1.0", "HTTP/1.1"):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self,despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
# TODO(durin42): seriously? type checking? do we care?
if ((not isinstance(destpair, (list, tuple))) or len(destpair) < 2
or not isinstance(destpair[0], str) or not isinstance(destpair[1], int)):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
socket.socket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
socket.socket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
socket.socket.connect(self, (self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
socket.socket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| [
[
8,
0,
0.0364,
0.0705,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
8,
0,
0.0841,
0.0205,
0,
0.66,
0.0455,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0977,
0.0023,
0,
0.66,
... | [
"\"\"\"SocksiPy - Python SOCKS module.\nVersion 1.00\n\nCopyright 2006 Dan-Haim. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n1. Redistributions of source code must retain the above copyright ... |
"""
iri2uri
Converts an IRI to a URI.
"""
__author__ = "Joe Gregorio (joe@bitworking.org)"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:John.Doe@example.com",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
| [
[
8,
0,
0.0318,
0.0545,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0636,
0.0091,
0,
0.66,
0.0909,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0727,
0.0091,
0,
0.66... | [
"\"\"\"\niri2uri\n\nConverts an IRI to a URI.\n\n\"\"\"",
"__author__ = \"Joe Gregorio (joe@bitworking.org)\"",
"__copyright__ = \"Copyright 2006, Joe Gregorio\"",
"__contributors__ = []",
"__version__ = \"1.0.0\"",
"__license__ = \"MIT\"",
"__history__ = \"\"\"\n\"\"\"",
"import urlparse",
"escape_... |
#!/usr/bin/python2.4
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Discovery document tests
Functional tests that verify we can retrieve data from existing services.
"""
__author__ = 'ade@google.com (Ade Oshineye)'
import httplib2
import pprint
from apiclient.discovery import build
import httplib2
import logging
import pickle
import os
import time
import unittest
# TODO(ade) Remove this mock once the bug in the discovery document is fixed
DATA_DIR = os.path.join(logging.os.path.dirname(__file__), '../tests/data')
class HttpMock(object):
def __init__(self, filename, headers):
f = file(os.path.join(DATA_DIR, filename), 'r')
self.data = f.read()
f.close()
self.headers = headers
def request(self, uri, method="GET", body=None, headers=None, redirections=1, connection_type=None):
return httplib2.Response(self.headers), self.data
class BuzzFunctionalTest(unittest.TestCase):
def test_can_get_specific_activity(self):
buzz = build('buzz', 'v1')
activity = buzz.activities().get(userId='105037104815911535953',
postId='B:z12sspviqyakfvye123wehng0muwz5jzq04').execute()
self.assertTrue(activity is not None)
def test_can_get_specific_activity_with_tag_id(self):
buzz = build('buzz', 'v1')
activity = buzz.activities().get(userId='105037104815911535953',
postId='tag:google.com,2010:buzz:z13ptnw5usmnv15ey22fzlswnuqoebasu').execute()
self.assertTrue(activity is not None)
def test_can_get_buzz_activities_with_many_params(self):
buzz = build('buzz', 'v1')
max_results = 2
activities_command = buzz.activities()
activities = activities_command.list(userId='googlebuzz', scope='@self',
max_comments=max_results*2 ,max_liked=max_results*3,
max_results=max_results).execute()
activity_count = len(activities['items'])
self.assertEquals(max_results, activity_count)
activities = activities_command.list_next(activities).execute()
activity_count = len(activities['items'])
self.assertEquals(max_results, activity_count)
def test_can_get_multiple_pages_of_buzz_activities(self):
buzz = build('buzz', 'v1')
max_results = 2
activities_command = buzz.activities()
activities = activities_command.list(userId='adewale', scope='@self',
max_results=max_results).execute()
for count in range(10):
activities = activities_command.list_next(activities).execute()
activity_count = len(activities['items'])
self.assertEquals(max_results, activity_count, 'Failed after %s pages' % str(count))
def IGNORE__test_can_get_multiple_pages_of_buzz_likers(self):
# Ignore this test until the Buzz API fixes the bug with next links
# http://code.google.com/p/google-buzz-api/issues/detail?id=114
self.http = HttpMock('buzz.json', {'status': '200'})
buzz = build('buzz', 'v1', self.http)
max_results = 1
people_cmd = buzz.people()
# The post https://www.googleapis.com/buzz/v1/activities/111062888259659218284/@self/B:z13nh535yk2syfob004cdjyb3mjeulcwv3c?alt=json#
#Perform this call https://www.googleapis.com/buzz/v1/activities/111062888259659218284/@self/B:z13nh535yk2syfob004cdjyb3mjeulcwv3c/@liked?alt=json&max-results=1
people = people_cmd.liked(groupId='@liked', userId='googlebuzz', scope='@self',
postId='B:z13nh535yk2syfob004cdjyb3mjeulcwv3c', max_results=max_results).execute()
for count in range(10):
people = people_cmd.liked_next(people).execute()
people_count = len(people['items'])
self.assertEquals(max_results, people_count, 'Failed after %s pages' % str(count))
def test_can_get_user_profile(self):
buzz = build('buzz', 'v1')
person = buzz.people().get(userId='googlebuzz').execute()
self.assertTrue(person is not None)
self.assertEquals('buzz#person', person['kind'])
self.assertEquals('Google Buzz Team', person['displayName'])
self.assertEquals('111062888259659218284', person['id'])
self.assertEquals('https://profiles.google.com/googlebuzz', person['profileUrl'])
def test_can_get_user_profile_using_numeric_identifier(self):
buzz = build('buzz', 'v1')
person = buzz.people().get(userId='108242092577082601423').execute()
self.assertTrue(person is not None)
self.assertEquals('buzz#person', person['kind'])
self.assertEquals('Test Account', person['displayName'])
self.assertEquals('108242092577082601423', person['id'])
self.assertEquals('https://profiles.google.com/108242092577082601423', person['profileUrl'])
def test_can_get_followees_of_user(self):
buzz = build('buzz', 'v1')
expected_followees = 30
following = buzz.people().list(userId='googlebuzz', groupId='@following', max_results=expected_followees).execute()
self.assertEquals(expected_followees, following['totalResults'])
self.assertEquals(expected_followees, len(following['entry']))
def test_can_efficiently_get_follower_count_of_user(self):
buzz = build('buzz', 'v1')
# Restricting max_results to 1 means only a tiny amount of data comes back but the totalResults still has the total.
followers = buzz.people().list(userId='googlebuzz', groupId='@followers',
max_results='1').execute()
# @googlebuzz has a large but fluctuating number of followers
# It is sufficient if the result is bigger than 10, 000
follower_count = followers['totalResults']
self.assertTrue(follower_count > 10000, follower_count)
def test_follower_count_is_missing_for_user_with_hidden_follower_count(self):
buzz = build('buzz', 'v1')
followers = buzz.people().list(userId='adewale', groupId='@followers').execute()
self.assertFalse('totalResults' in followers)
class BuzzAuthenticatedFunctionalTest(unittest.TestCase):
def __init__(self, method_name):
unittest.TestCase.__init__(self, method_name)
credentials_dir = os.path.join(logging.os.path.dirname(__file__), './data')
f = file(os.path.join(credentials_dir, 'buzz_credentials.dat'), 'r')
credentials = pickle.loads(f.read())
f.close()
self.http = credentials.authorize(httplib2.Http())
def test_can_create_activity(self):
buzz = build('buzz', 'v1', http=self.http)
activity = buzz.activities().insert(userId='@me', body={
'data': {
'title': 'Testing insert',
'object': {
'content': u'Just a short note to show that insert is working. ?',
'type': 'note'}
}
}
).execute()
self.assertTrue(activity is not None)
def test_can_create_private_activity(self):
buzz = build('buzz', 'v1', http=self.http)
activity = buzz.activities().insert(userId='@me', body={
'data': {
'title': 'Testing insert',
'object': {
'content': 'This is a private post.'
},
'visibility': {
'entries': [
{ 'id': 'tag:google.com,2010:buzz-group:108242092577082601423:13' }
]
}
}
}
).execute()
self.assertTrue(activity is not None)
def test_can_create_and_delete_new_group(self):
buzz = build('buzz', 'v1', http=self.http)
group_name = 'New Group Created At' + str(time.time())
group = buzz.groups().insert(userId='@me', body = {
'data': {
'title': group_name
}
}).execute()
self.assertTrue(group is not None)
result = buzz.groups().delete(userId='@me', groupId=group['id']).execute()
self.assertEquals({}, result)
def test_can_identify_number_of_groups_belonging_to_user(self):
buzz = build('buzz', 'v1', http=self.http)
groups = buzz.groups().list(userId='108242092577082601423').execute()
# This should work as long as no-one deletes the 4 default groups for this test account
expected_default_number_of_groups = 4
self.assertTrue(len(groups['items']) > expected_default_number_of_groups)
def IGNORE__test_can_like_activity(self):
buzz = build('buzz', 'v1', http=self.http)
activity = buzz.activities().insert(userId='@me', body={
'data': {
'title': 'Testing insert',
'object': {
'content': u'Just a short note to show that insert is working. ?',
'type': 'note'}
}
}
).execute()
pprint.pprint(activity)
id = activity['id']
likers = buzz.people().liked(userId='105037104815911535953', postId=id, groupId='@liked', scope='@self').execute()
# Todo(ade) Insert the new liker once the Buzz back-end bug is fixed
def test_can_comment_on_activity(self):
buzz = build('buzz', 'v1', http=self.http)
activity = buzz.activities().insert(userId='@me', body={
'data': {
'title': 'A new activity',
'object': {
'content': u'The body of the new activity',
'type': 'note'}
}
}
).execute()
id = activity['id']
comment = buzz.comments().insert(userId='@me', postId=id, body={
'data': {
'content': 'A comment on the new activity'
}
}).execute()
def test_can_list_groups_belonging_to_user(self):
buzz = build('buzz', 'v1', http=self.http)
groups = buzz.groups().list(userId='108242092577082601423').execute()
group = buzz.groups().get(userId='108242092577082601423', groupId='G:108242092577082601423:15').execute()
self.assertEquals('G:108242092577082601423:15', group['id'], group)
group = buzz.groups().get(userId='108242092577082601423', groupId='G:108242092577082601423:14').execute()
self.assertEquals('G:108242092577082601423:14', group['id'], group)
group = buzz.groups().get(userId='108242092577082601423', groupId='G:108242092577082601423:13').execute()
self.assertEquals('G:108242092577082601423:13', group['id'], group)
group = buzz.groups().get(userId='108242092577082601423', groupId='G:108242092577082601423:6').execute()
self.assertEquals('G:108242092577082601423:6', group['id'], group)
def test_can_delete_activity(self):
buzz = build('buzz', 'v1', http=self.http)
activity = buzz.activities().insert(userId='@me', body={
'data': {
'title': 'Activity to be deleted',
'object': {
'content': u'Created this activity so that it can be deleted.',
'type': 'note'}
}
}
).execute()
id = activity['id']
buzz.activities().delete(scope='@self', userId='@me', postId=id).execute()
time.sleep(2)
activity_url = activity['links']['self'][0]['href']
resp, content = self.http.request(activity_url, 'GET')
self.assertEquals(404, resp.status)
if __name__ == '__main__':
unittest.main()
| [
[
8,
0,
0.0233,
0.0143,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0358,
0.0036,
0,
0.66,
0.0667,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.043,
0.0036,
0,
0.66,
... | [
"\"\"\"Discovery document tests\n\nFunctional tests that verify we can retrieve data from existing services.\n\"\"\"",
"__author__ = 'ade@google.com (Ade Oshineye)'",
"import httplib2",
"import pprint",
"from apiclient.discovery import build",
"import httplib2",
"import logging",
"import pickle",
"i... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for running against a
local server.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
# Enable this sample to be run from the top-level directory
import os
import sys
sys.path.insert(0, os.getcwd())
from apiclient.discovery import build
import httplib2
# httplib2.debuglevel = 4
import pickle
import pprint
DISCOVERY_URI = ('http://localhost:3990/discovery/v0.2beta1/describe/'
'{api}/{apiVersion}')
def main():
http = httplib2.Http()
service = build("buzz", "v1", http=http, discoveryServiceUrl=DISCOVERY_URI)
help(service.activities().list)
print service.activities().list(userId='@self', scope='@me', c='foo').uri
if __name__ == '__main__':
main()
| [
[
8,
0,
0.2027,
0.1081,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2973,
0.027,
0,
0.66,
0.0909,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.3784,
0.027,
0,
0.66,
... | [
"\"\"\"Simple command-line example for running against a\n local server.\n\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import os",
"import sys",
"sys.path.insert(0, os.getcwd())",
"from apiclient.discovery import build",
"import httplib2",
"import pickle",
"import pprint",
"... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for running against a
local server.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
# Enable this sample to be run from the top-level directory
import os
import sys
sys.path.insert(0, os.getcwd())
from apiclient.discovery import build
import httplib2
# httplib2.debuglevel = 4
import pickle
import pprint
DISCOVERY_URI = ('http://localhost:3990/discovery/v0.2beta1/describe/'
'{api}/{apiVersion}')
def main():
http = httplib2.Http()
service = build("buzz", "v1", http=http, discoveryServiceUrl=DISCOVERY_URI)
help(service.activities().list)
print service.activities().list(userId='@self', scope='@me', c='foo').uri
if __name__ == '__main__':
main()
| [
[
8,
0,
0.2027,
0.1081,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2973,
0.027,
0,
0.66,
0.0909,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.3784,
0.027,
0,
0.66,
... | [
"\"\"\"Simple command-line example for running against a\n local server.\n\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import os",
"import sys",
"sys.path.insert(0, os.getcwd())",
"from apiclient.discovery import build",
"import httplib2",
"import pickle",
"import pprint",
"... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
client_id='2ad565600216d25d9cde',
client_secret='03b56df2949a520be6049ff98b89813f17b467dc',
scope='read',
user_agent='oauth2client-sample/1.0',
auth_uri='https://api.dailymotion.com/oauth/authorize',
token_uri='https://api.dailymotion.com/oauth/token'
)
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
else:
http = httplib2.Http()
resp, content1 = http.request('https://api.dailymotion.com/me?access_token=%s' %
credentials.access_token)
http = credentials.authorize(http)
resp, content2 = http.request('https://api.dailymotion.com/me')
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
self.response.out.write(
template.render(
path, {
'content1': content1,
'content2': content2,
'logout': logout
}))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1622,
0.009,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1892,
0.009,
0,
0.66,
0.05,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.1982,
0.009,
0,
0.66,
... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from oauth2client.appengine import CredentialsProperty",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.clien... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
client_id='2ad565600216d25d9cde',
client_secret='03b56df2949a520be6049ff98b89813f17b467dc',
scope='read',
user_agent='oauth2client-sample/1.0',
auth_uri='https://api.dailymotion.com/oauth/authorize',
token_uri='https://api.dailymotion.com/oauth/token'
)
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
else:
http = httplib2.Http()
resp, content1 = http.request('https://api.dailymotion.com/me?access_token=%s' %
credentials.access_token)
http = credentials.authorize(http)
resp, content2 = http.request('https://api.dailymotion.com/me')
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
self.response.out.write(
template.render(
path, {
'content1': content1,
'content2': content2,
'logout': logout
}))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1622,
0.009,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1892,
0.009,
0,
0.66,
0.05,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.1982,
0.009,
0,
0.66,
... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from oauth2client.appengine import CredentialsProperty",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.clien... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Latitude.
Command-line application that sets the users
current location.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from apiclient.oauth import CredentialsInvalidError
# Uncomment to get detailed logging
#httplib2.debuglevel = 4
def main():
storage = Storage('latitude.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
flow = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/latitude',
user_agent='latitude-cmdline-sample/1.0')
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("latitude", "v1", http=http)
body = {
"data": {
"kind": "latitude#location",
"latitude": 37.420352,
"longitude": -122.083389,
"accuracy": 130,
"altitude": 35
}
}
try:
print service.currentLocation().insert(body=body).execute()
except CredentialsInvalidError:
print 'Your credentials are no longer valid.'
print 'Please re-run this application to re-authorize.'
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1356,
0.0847,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2034,
0.0169,
0,
0.66,
0.1111,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2373,
0.0169,
0,
0.66,... | [
"\"\"\"Simple command-line example for Latitude.\n\nCommand-line application that sets the users\ncurrent location.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"from apiclient.discovery import build",
"from oauth2client.file import Storage",
"from oauth2client.clien... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Latitude.
Command-line application that sets the users
current location.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from apiclient.oauth import CredentialsInvalidError
# Uncomment to get detailed logging
#httplib2.debuglevel = 4
def main():
storage = Storage('latitude.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
flow = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/latitude',
user_agent='latitude-cmdline-sample/1.0')
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("latitude", "v1", http=http)
body = {
"data": {
"kind": "latitude#location",
"latitude": 37.420352,
"longitude": -122.083389,
"accuracy": 130,
"altitude": 35
}
}
try:
print service.currentLocation().insert(body=body).execute()
except CredentialsInvalidError:
print 'Your credentials are no longer valid.'
print 'Please re-run this application to re-authorize.'
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1356,
0.0847,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2034,
0.0169,
0,
0.66,
0.1111,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2373,
0.0169,
0,
0.66,... | [
"\"\"\"Simple command-line example for Latitude.\n\nCommand-line application that sets the users\ncurrent location.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"from apiclient.discovery import build",
"from oauth2client.file import Storage",
"from oauth2client.clien... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
# Visit https://code.google.com/apis/console to
# generate your client_id, client_secret and to
# register your redirect_uri.
client_id='<YOUR CLIENT ID HERE>',
client_secret='<YOUR CLIENT SECRET HERE>',
scope='https://www.googleapis.com/auth/buzz',
user_agent='buzz-cmdline-sample/1.0',
domain='anonymous',
xoauth_displayname='Google App Engine Example App')
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
service = build("buzz", "v1", http=http)
activities = service.activities()
activitylist = activities.list(scope='@consumption',
userId='@me').execute()
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
self.response.out.write(
template.render(
path, {'activitylist': activitylist,
'logout': logout
}))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1651,
0.0092,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1927,
0.0092,
0,
0.66,
0.05,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2018,
0.0092,
0,
0.6... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from oauth2client.appengine import CredentialsProperty",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.clien... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
# Visit https://code.google.com/apis/console to
# generate your client_id, client_secret and to
# register your redirect_uri.
client_id='<YOUR CLIENT ID HERE>',
client_secret='<YOUR CLIENT SECRET HERE>',
scope='https://www.googleapis.com/auth/buzz',
user_agent='buzz-cmdline-sample/1.0',
domain='anonymous',
xoauth_displayname='Google App Engine Example App')
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
service = build("buzz", "v1", http=http)
activities = service.activities()
activitylist = activities.list(scope='@consumption',
userId='@me').execute()
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
self.response.out.write(
template.render(
path, {'activitylist': activitylist,
'logout': logout
}))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1651,
0.0092,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1927,
0.0092,
0,
0.66,
0.05,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2018,
0.0092,
0,
0.6... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from oauth2client.appengine import CredentialsProperty",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.clien... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
import pickle
import base64
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import models
from oauth2client.django_orm import FlowField
from oauth2client.django_orm import CredentialsField
# The Flow could also be stored in memcache since it is short lived.
class FlowModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
flow = FlowField()
class CredentialsModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
credential = CredentialsField()
class CredentialsAdmin(admin.ModelAdmin):
pass
class FlowAdmin(admin.ModelAdmin):
pass
admin.site.register(CredentialsModel, CredentialsAdmin)
admin.site.register(FlowModel, FlowAdmin)
| [
[
1,
0,
0.0303,
0.0303,
0,
0.66,
0,
848,
0,
1,
0,
0,
848,
0,
0
],
[
1,
0,
0.0606,
0.0303,
0,
0.66,
0.0833,
177,
0,
1,
0,
0,
177,
0,
0
],
[
1,
0,
0.1212,
0.0303,
0,
... | [
"import pickle",
"import base64",
"from django.contrib import admin",
"from django.contrib.auth.models import User",
"from django.db import models",
"from oauth2client.django_orm import FlowField",
"from oauth2client.django_orm import CredentialsField",
"class FlowModel(models.Model):\n id = models.F... |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| [
[
8,
0,
0.1458,
0.25,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3333,
0.0417,
0,
0.66,
0.3333,
944,
0,
1,
0,
0,
944,
0,
0
],
[
3,
0,
0.5833,
0.2917,
0,
0.66,
... | [
"\"\"\"\nThis file demonstrates two different styles of tests (one doctest and one\nunittest). These will both pass when you run \"manage.py test\".\n\nReplace these with more appropriate tests for your application.\n\"\"\"",
"from django.test import TestCase",
"class SimpleTest(TestCase):\n\n def test_basic... |
import os
import logging
import httplib2
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from oauth2client.django_orm import Storage
from oauth2client.client import OAuth2WebServerFlow
from django_sample.buzz.models import CredentialsModel
from django_sample.buzz.models import FlowModel
from apiclient.discovery import build
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
STEP2_URI = 'http://localhost:8000/auth_return'
@login_required
def index(request):
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
flow = OAuth2WebServerFlow(
client_id='837647042410.apps.googleusercontent.com',
client_secret='+SWwMCL9d8gWtzPRa1lXw5R8',
scope='https://www.googleapis.com/auth/buzz',
user_agent='buzz-django-sample/1.0',
)
authorize_url = flow.step1_get_authorize_url(STEP2_URI)
f = FlowModel(id=request.user, flow=flow)
f.save()
return HttpResponseRedirect(authorize_url)
else:
http = httplib2.Http()
http = credential.authorize(http)
service = build("buzz", "v1", http=http)
activities = service.activities()
activitylist = activities.list(scope='@consumption',
userId='@me').execute()
logging.info(activitylist)
return render_to_response('buzz/welcome.html', {
'activitylist': activitylist,
})
@login_required
def auth_return(request):
try:
f = FlowModel.objects.get(id=request.user)
credential = f.flow.step2_exchange(request.REQUEST)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
f.delete()
return HttpResponseRedirect("/")
except FlowModel.DoesNotExist:
pass
| [
[
1,
0,
0.0164,
0.0164,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0328,
0.0164,
0,
0.66,
0.0667,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0492,
0.0164,
0,
... | [
"import os",
"import logging",
"import httplib2",
"from django.http import HttpResponse",
"from django.core.urlresolvers import reverse",
"from django.contrib.auth.decorators import login_required",
"from oauth2client.django_orm import Storage",
"from oauth2client.client import OAuth2WebServerFlow",
... |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the
directory containing %r. It appears you've customized things. You'll
have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError
somehow.)\n""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1333,
0.0667,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.6667,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.2667,
0.0667,
1,
0.39,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"\"\"Error: Can't find the file 'settings.py' in the\ndirectory containing %r. It appears you've customized things. You'll\nhave to ru... |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the
directory containing %r. It appears you've customized things. You'll
have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError
somehow.)\n""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1333,
0.0667,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.6667,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.2667,
0.0667,
1,
0.31,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"\"\"Error: Can't find the file 'settings.py' in the\ndirectory containing %r. It appears you've customized things. You'll\nhave to ru... |
import os
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^$', 'django_sample.buzz.views.index'),
(r'^auth_return', 'django_sample.buzz.views.auth_return'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'buzz/login.html'}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(os.path.dirname(__file__), 'static')
}),
)
| [
[
1,
0,
0.04,
0.04,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.08,
0.04,
0,
0.66,
0.25,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.2,
0.04,
0,
0.66,
0.5,
... | [
"import os",
"from django.conf.urls.defaults import *",
"from django.contrib import admin",
"admin.autodiscover()",
"urlpatterns = patterns('',\n # Example:\n (r'^$', 'django_sample.buzz.views.index'),\n (r'^auth_return', 'django_sample.buzz.views.auth_return'),\n\n # Uncomment the admin/doc lin... |
# Django settings for django_sample project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'database.sqlite3'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_=9hq-$t_uv1ckf&s!y2$9g$1dm*6p1cl%*!^mg=7gr)!zj32d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'django_sample.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates')
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_sample.buzz'
)
| [
[
1,
0,
0.0241,
0.012,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
14,
0,
0.0482,
0.012,
0,
0.66,
0.0435,
309,
1,
0,
0,
0,
0,
4,
0
],
[
14,
0,
0.0602,
0.012,
0,
0.6... | [
"import os",
"DEBUG = True",
"TEMPLATE_DEBUG = DEBUG",
"ADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)",
"MANAGERS = ADMINS",
"DATABASE_ENGINE = 'sqlite3'",
"DATABASE_NAME = 'database.sqlite3'",
"DATABASE_USER = ''",
"DATABASE_PASSWORD = ''",
"DATABASE_HOST = ''",
"DATABASE_PORT = ... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Latitude.
Command-line application that sets the users
current location.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import httplib2
import pickle
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
from apiclient.ext.authtools import run
from apiclient.ext.file import Storage
# Uncomment to get detailed logging
# httplib2.debuglevel = 4
def main():
storage = Storage('latitude.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
auth_discovery = build("latitude", "v1").auth_discovery()
flow = FlowThreeLegged(auth_discovery,
# You MUST have a consumer key and secret tied to a
# registered domain to use the latitude API.
#
# https://www.google.com/accounts/ManageDomains
consumer_key='REGISTERED DOMAIN NAME',
consumer_secret='KEY GIVEN DURING REGISTRATION',
user_agent='google-api-client-python-latitude/1.0',
domain='REGISTERED DOMAIN NAME',
scope='https://www.googleapis.com/auth/latitude',
xoauth_displayname='Google API Latitude Example',
location='current',
granularity='city'
)
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("latitude", "v1", http=http)
body = {
"data": {
"kind": "latitude#location",
"latitude": 37.420352,
"longitude": -122.083389,
"accuracy": 130,
"altitude": 35
}
}
print service.currentLocation().insert(body=body).execute()
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1176,
0.0735,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1765,
0.0147,
0,
0.66,
0.1,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2206,
0.0147,
0,
0.66,
... | [
"\"\"\"Simple command-line example for Latitude.\n\nCommand-line application that sets the users\ncurrent location.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"from apiclient.discovery import build",
"import httplib2",
"import pickle",
"from apiclient.discovery import build",
"from... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Latitude.
Command-line application that sets the users
current location.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import httplib2
import pickle
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
from apiclient.ext.authtools import run
from apiclient.ext.file import Storage
# Uncomment to get detailed logging
# httplib2.debuglevel = 4
def main():
storage = Storage('latitude.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
auth_discovery = build("latitude", "v1").auth_discovery()
flow = FlowThreeLegged(auth_discovery,
# You MUST have a consumer key and secret tied to a
# registered domain to use the latitude API.
#
# https://www.google.com/accounts/ManageDomains
consumer_key='REGISTERED DOMAIN NAME',
consumer_secret='KEY GIVEN DURING REGISTRATION',
user_agent='google-api-client-python-latitude/1.0',
domain='REGISTERED DOMAIN NAME',
scope='https://www.googleapis.com/auth/latitude',
xoauth_displayname='Google API Latitude Example',
location='current',
granularity='city'
)
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("latitude", "v1", http=http)
body = {
"data": {
"kind": "latitude#location",
"latitude": 37.420352,
"longitude": -122.083389,
"accuracy": 130,
"altitude": 35
}
}
print service.currentLocation().insert(body=body).execute()
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1176,
0.0735,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1765,
0.0147,
0,
0.66,
0.1,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2206,
0.0147,
0,
0.66,
... | [
"\"\"\"Simple command-line example for Latitude.\n\nCommand-line application that sets the users\ncurrent location.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"from apiclient.discovery import build",
"import httplib2",
"import pickle",
"from apiclient.discovery import build",
"from... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from apiclient.ext.appengine import FlowThreeLeggedProperty
from apiclient.ext.appengine import OAuthCredentialsProperty
from apiclient.ext.appengine import StorageByKeyName
from apiclient.oauth import FlowThreeLegged
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
APP_ID = os.environ['APPLICATION_ID']
class Credentials(db.Model):
credentials = OAuthCredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
storage = StorageByKeyName(Credentials, user.user_id(), 'credentials')
http = httplib2.Http()
credentials = storage.get()
if credentials:
http = credentials.authorize(http)
service = build("buzz", "v1", http=http)
if not credentials:
return begin_oauth_flow(self, user, service)
followers = service.people().list(
userId='@me', groupId='@followers').execute()
self.response.out.write('Hello, you have %s followers!' %
followers['totalResults'])
def begin_oauth_flow(request_handler, user, service):
flow = FlowThreeLegged(service.auth_discovery(),
consumer_key='anonymous',
consumer_secret='anonymous',
user_agent='%s/1.0' % APP_ID,
domain='anonymous',
scope='https://www.googleapis.com/auth/buzz',
xoauth_displayname='App Name')
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
request_handler.redirect(authorize_url)
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
storage = StorageByKeyName(Credentials, user.user_id(), 'credentials')
flow = pickle.loads(memcache.get(user.user_id()))
credentials = flow.step2_exchange(self.request.params)
storage.put(credentials)
self.redirect("/")
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1698,
0.0094,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1981,
0.0094,
0,
0.66,
0.0455,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2075,
0.0094,
0,
0... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from apiclient.ext.appengine import FlowThreeLeggedProperty",
"from apiclient.ext.appengine import OAuthCredentialsProperty",
"from apic... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from apiclient.ext.appengine import FlowThreeLeggedProperty
from apiclient.ext.appengine import OAuthCredentialsProperty
from apiclient.ext.appengine import StorageByKeyName
from apiclient.oauth import FlowThreeLegged
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
APP_ID = os.environ['APPLICATION_ID']
class Credentials(db.Model):
credentials = OAuthCredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
storage = StorageByKeyName(Credentials, user.user_id(), 'credentials')
http = httplib2.Http()
credentials = storage.get()
if credentials:
http = credentials.authorize(http)
service = build("buzz", "v1", http=http)
if not credentials:
return begin_oauth_flow(self, user, service)
followers = service.people().list(
userId='@me', groupId='@followers').execute()
self.response.out.write('Hello, you have %s followers!' %
followers['totalResults'])
def begin_oauth_flow(request_handler, user, service):
flow = FlowThreeLegged(service.auth_discovery(),
consumer_key='anonymous',
consumer_secret='anonymous',
user_agent='%s/1.0' % APP_ID,
domain='anonymous',
scope='https://www.googleapis.com/auth/buzz',
xoauth_displayname='App Name')
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
request_handler.redirect(authorize_url)
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
storage = StorageByKeyName(Credentials, user.user_id(), 'credentials')
flow = pickle.loads(memcache.get(user.user_id()))
credentials = flow.step2_exchange(self.request.params)
storage.put(credentials)
self.redirect("/")
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1698,
0.0094,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1981,
0.0094,
0,
0.66,
0.0455,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2075,
0.0094,
0,
0... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from apiclient.ext.appengine import FlowThreeLeggedProperty",
"from apiclient.ext.appengine import OAuthCredentialsProperty",
"from apic... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a histogram of the top 15 brand distribution for a search
query.
Histograms are created by using the "Facets" functionality of the API. A
Facet is a view of a certain property of products, containing a number of
buckets, one for each value of that property. Or concretely, for a parameter
such as "brand" of a product, the facets would include a facet for brand,
which would contain a number of buckets, one for each brand returned in the
result.
A bucket contains either a value and a count, or a value and a range. In the
simple case of a value and a count for our example of the "brand" property,
the value would be the brand name, eg "sony" and the count would be the
number of results in the search.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US', q=u'digital camera',
facets_include='brand:15', facets_enabled=True)
response = request.execute()
# Pick the first and only facet for this query
facet = response['facets'][0]
print '\n\tHistogram for "%s":\n' % facet['property']
labels = []
values = []
for bucket in facet['buckets']:
labels.append(bucket['value'].rjust(20))
values.append(bucket['count'])
weighting = 50.0 / max(values)
for label, value in zip(labels, values):
print label, '#' * int(weighting * value), '(%s)' % value
print
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1111,
0.0185,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1481,
0.0185,
0,
0.66,
0.25,
78,
0,
1,
0,
0,
78,
0,
0
],
[
14,
0,
0.2037,
0.0185,
0,
0.66,
... | [
"\"\"\"Query with ranked results against the shopping search API\"\"\"",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a histogram of the top 15 brand distribution for a search\n query... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Full text search query against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera".
This is achieved by using the q query parameter to the list method.
The "|" operator can be used to search for alternative search terms, for
example: q = 'banana|apple' will search for bananas or apples.
Search phrases such as those containing spaces can be specified by
surrounding them with double quotes, for example q='"mp3 player"'. This can
be useful when combining with the "|" operator such as q = '"mp3
player"|ipod'.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# Note the 'q' parameter, which will contain the value of the search query
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.15,
0.025,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2,
0.025,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.25,
0.025,
0,
0.66,
0.33... | [
"\"\"\"Full text search query against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of all public products matching the search ... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Basic query against the public shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products available in the
United States.
Note: The source and country arguments are required to pass to the list
method.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1875,
0.0312,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.25,
0.0312,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.3125,
0.0312,
0,
0.66,
... | [
"\"\"\"Basic query against the public shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of all public products available in the\n Unit... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query that is restricted by a parameter against the public shopping search
API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera", that are created by "Canon" available in the
United States.
The "restrictBy" parameter controls which types of results are returned.
Multiple values for a single restrictBy can be separated by the "|" operator,
so to look for all products created by Canon, Sony, or Apple:
restrictBy = 'brand:canon|sony|apple'
Multiple restricting parameters should be separated by a comma, so for
products created by Sony with the word "32GB" in the title:
restrictBy = 'brand:sony,title:32GB'
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US',
restrictBy='brand:canon', q='Digital Camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1477,
0.0455,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2045,
0.0227,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.25,
0.0227,
0,
0.66,
... | [
"\"\"\"Query that is restricted by a parameter against the public shopping search\nAPI\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of all public prod... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a histogram of the top 15 brand distribution for a search
query.
Histograms are created by using the "Facets" functionality of the API. A
Facet is a view of a certain property of products, containing a number of
buckets, one for each value of that property. Or concretely, for a parameter
such as "brand" of a product, the facets would include a facet for brand,
which would contain a number of buckets, one for each brand returned in the
result.
A bucket contains either a value and a count, or a value and a range. In the
simple case of a value and a count for our example of the "brand" property,
the value would be the brand name, eg "sony" and the count would be the
number of results in the search.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US', q=u'digital camera',
facets_include='brand:15', facets_enabled=True)
response = request.execute()
# Pick the first and only facet for this query
facet = response['facets'][0]
print '\n\tHistogram for "%s":\n' % facet['property']
labels = []
values = []
for bucket in facet['buckets']:
labels.append(bucket['value'].rjust(20))
values.append(bucket['count'])
weighting = 50.0 / max(values)
for label, value in zip(labels, values):
print label, '#' * int(weighting * value), '(%s)' % value
print
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1111,
0.0185,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1481,
0.0185,
0,
0.66,
0.25,
78,
0,
1,
0,
0,
78,
0,
0
],
[
14,
0,
0.2037,
0.0185,
0,
0.66,
... | [
"\"\"\"Query with ranked results against the shopping search API\"\"\"",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a histogram of the top 15 brand distribution for a search\n query... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with grouping against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' and grouped by the 8 top brands.
The list method of the resource should be called with the "crowdBy"
parameter. Each parameter should be designed as <attribute>:<occurence>,
where <occurrence> is the number of that <attribute> that will be used. For
example, to crowd by the 5 top brands, the parameter would be "brand:5". The
possible rules for crowding are currently:
account_id:<occurrence> (eg account_id:5)
brand:<occurrence> (eg brand:5)
condition:<occurrence> (eg condition:3)
gtin:<occurrence> (eg gtin:10)
price:<occurrence> (eg price:10)
Multiple crowding rules should be specified by separating them with a comma,
for example to crowd by the top 5 brands and then condition of those items,
the parameter should be crowdBy="brand:5,condition:3"
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The crowdBy parameter to the list method causes the results to be grouped,
# in this case by the top 8 brands.
request = resource.list(source='public', country='US', q=u'digital camera',
crowdBy='brand:8')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.125,
0.0208,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1667,
0.0208,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.2083,
0.0208,
0,
0.66,... | [
"\"\"\"Query with grouping against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of public products in the United States mathin... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Queries with paginated results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a the entire paginated feed of public products in the United
States.
Pagination is controlled with the "startIndex" parameter passed to the list
method of the resource.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The first request contains the information we need for the total items, and
# page size, as well as returning the first page of results.
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
itemsPerPage = response['itemsPerPage']
totalItems = response['totalItems']
for i in range(1, totalItems, itemsPerPage):
answer = raw_input('About to display results from %s to %s, y/(n)? ' %
(i, i + itemsPerPage))
if answer.strip().lower().startswith('n'):
# Stop if the user has had enough
break
else:
# Fetch this series of results
request = resource.list(source='public', country='US',
q=u'digital camera', startIndex=i)
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1277,
0.0213,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1702,
0.0213,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.2128,
0.0213,
0,
0.66... | [
"\"\"\"Queries with paginated results against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a the entire paginated feed of public prod... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Full text search query against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera".
This is achieved by using the q query parameter to the list method.
The "|" operator can be used to search for alternative search terms, for
example: q = 'banana|apple' will search for bananas or apples.
Search phrases such as those containing spaces can be specified by
surrounding them with double quotes, for example q='"mp3 player"'. This can
be useful when combining with the "|" operator such as q = '"mp3
player"|ipod'.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# Note the 'q' parameter, which will contain the value of the search query
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.15,
0.025,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2,
0.025,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.25,
0.025,
0,
0.66,
0.33... | [
"\"\"\"Full text search query against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of all public products matching the search ... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' ranked by ascending price.
The list method for the resource should be called with the "rankBy"
parameter. 5 parameters to rankBy are currently supported by the API. They
are:
"relevancy"
"modificationTime:ascending"
"modificationTime:descending"
"price:ascending"
"price:descending"
These parameters can be combined
The default ranking is "relevancy" if the rankBy parameter is omitted.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The rankBy parameter to the list method causes results to be ranked, in
# this case by ascending price.
request = resource.list(source='public', country='US', q=u'digital camera',
rankBy='price:ascending')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1304,
0.0217,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1739,
0.0217,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.2174,
0.0217,
0,
0.66... | [
"\"\"\"Query with ranked results against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of public products in the United States ... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Basic query against the public shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products available in the
United States.
Note: The source and country arguments are required to pass to the list
method.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1875,
0.0312,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.25,
0.0312,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.3125,
0.0312,
0,
0.66,
... | [
"\"\"\"Basic query against the public shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of all public products available in the\n Unit... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query that is restricted by a parameter against the public shopping search
API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera", that are created by "Canon" available in the
United States.
The "restrictBy" parameter controls which types of results are returned.
Multiple values for a single restrictBy can be separated by the "|" operator,
so to look for all products created by Canon, Sony, or Apple:
restrictBy = 'brand:canon|sony|apple'
Multiple restricting parameters should be separated by a comma, so for
products created by Sony with the word "32GB" in the title:
restrictBy = 'brand:sony,title:32GB'
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US',
restrictBy='brand:canon', q='Digital Camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1477,
0.0455,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2045,
0.0227,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.25,
0.0227,
0,
0.66,
... | [
"\"\"\"Query that is restricted by a parameter against the public shopping search\nAPI\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of all public prod... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Queries with paginated results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a the entire paginated feed of public products in the United
States.
Pagination is controlled with the "startIndex" parameter passed to the list
method of the resource.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The first request contains the information we need for the total items, and
# page size, as well as returning the first page of results.
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
itemsPerPage = response['itemsPerPage']
totalItems = response['totalItems']
for i in range(1, totalItems, itemsPerPage):
answer = raw_input('About to display results from %s to %s, y/(n)? ' %
(i, i + itemsPerPage))
if answer.strip().lower().startswith('n'):
# Stop if the user has had enough
break
else:
# Fetch this series of results
request = resource.list(source='public', country='US',
q=u'digital camera', startIndex=i)
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1277,
0.0213,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1702,
0.0213,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.2128,
0.0213,
0,
0.66... | [
"\"\"\"Queries with paginated results against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a the entire paginated feed of public prod... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with grouping against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' and grouped by the 8 top brands.
The list method of the resource should be called with the "crowdBy"
parameter. Each parameter should be designed as <attribute>:<occurence>,
where <occurrence> is the number of that <attribute> that will be used. For
example, to crowd by the 5 top brands, the parameter would be "brand:5". The
possible rules for crowding are currently:
account_id:<occurrence> (eg account_id:5)
brand:<occurrence> (eg brand:5)
condition:<occurrence> (eg condition:3)
gtin:<occurrence> (eg gtin:10)
price:<occurrence> (eg price:10)
Multiple crowding rules should be specified by separating them with a comma,
for example to crowd by the top 5 brands and then condition of those items,
the parameter should be crowdBy="brand:5,condition:3"
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The crowdBy parameter to the list method causes the results to be grouped,
# in this case by the top 8 brands.
request = resource.list(source='public', country='US', q=u'digital camera',
crowdBy='brand:8')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.125,
0.0208,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1667,
0.0208,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.2083,
0.0208,
0,
0.66,... | [
"\"\"\"Query with grouping against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of public products in the United States mathin... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' ranked by ascending price.
The list method for the resource should be called with the "rankBy"
parameter. 5 parameters to rankBy are currently supported by the API. They
are:
"relevancy"
"modificationTime:ascending"
"modificationTime:descending"
"price:ascending"
"price:descending"
These parameters can be combined
The default ranking is "relevancy" if the rankBy parameter is omitted.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The rankBy parameter to the list method causes results to be ranked, in
# this case by ascending price.
request = resource.list(source='public', country='US', q=u'digital camera',
rankBy='price:ascending')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1304,
0.0217,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1739,
0.0217,
0,
0.66,
0.1667,
276,
0,
1,
0,
0,
276,
0,
0
],
[
1,
0,
0.2174,
0.0217,
0,
0.66... | [
"\"\"\"Query with ranked results against the shopping search API\"\"\"",
"import pprint",
"from apiclient.discovery import build",
"SHOPPING_API_VERSION = 'v1'",
"DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'",
"def main():\n \"\"\"Get and print a feed of public products in the United States ... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import inspect
import os
import pydoc
import re
from apiclient.discovery import build
from apiclient.anyjson import simplejson
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
http = httplib2.Http(memcache)
resp, content = http.request('https://www.googleapis.com/discovery/v0.3/directory?preferred=true')
directory = simplejson.loads(content)['items']
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
def render(resource):
obj, name = pydoc.resolve(type(resource))
return pydoc.html.page(
pydoc.describe(obj), pydoc.html.document(obj, name))
class ResourceHandler(webapp.RequestHandler):
def get(self, service_name, version, collection):
resource = build(service_name, version)
# descend the object path
if collection:
path = collection.split('/')
if path:
for method in path:
resource = getattr(resource, method)()
page = render(resource)
collections = []
for name in dir(resource):
if not "_" in name and callable(getattr(resource, name)) and hasattr(
getattr(resource, name), '__is_resource__'):
collections.append(name)
if collection is None:
collection_path = ''
else:
collection_path = collection + '/'
for name in collections:
page = re.sub('strong>(%s)<' % name,
r'strong><a href="/%s/%s/%s">\1</a><' % (
service_name, version, collection_path + name), page)
# TODO(jcgregorio) breadcrumbs
# TODO(jcgregorio) sample code?
page = re.sub('<p>', r'<a href="/">Home</a><p>', page, 1)
self.response.out.write(page)
def main():
application = webapp.WSGIApplication(
[
(r'/', MainHandler),
(r'/([^\/]*)/([^\/]*)(?:/(.*))?', ResourceHandler),
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1837,
0.0102,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2143,
0.0102,
0,
0.66,
0.0625,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2245,
0.0102,
0,
0... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import inspect",
"import os",
"import pydoc",
"import re",
"from apiclient.discovery import build",
"from apiclient.anyjson import simplejson",
"from google.appengine.api import memcache",
"from google.appengine.ext import ... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import inspect
import os
import pydoc
import re
from apiclient.discovery import build
from apiclient.anyjson import simplejson
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
class MainHandler(webapp.RequestHandler):
def get(self):
http = httplib2.Http(memcache)
resp, content = http.request('https://www.googleapis.com/discovery/v0.3/directory?preferred=true')
directory = simplejson.loads(content)['items']
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
def render(resource):
obj, name = pydoc.resolve(type(resource))
return pydoc.html.page(
pydoc.describe(obj), pydoc.html.document(obj, name))
class ResourceHandler(webapp.RequestHandler):
def get(self, service_name, version, collection):
resource = build(service_name, version)
# descend the object path
if collection:
path = collection.split('/')
if path:
for method in path:
resource = getattr(resource, method)()
page = render(resource)
collections = []
for name in dir(resource):
if not "_" in name and callable(getattr(resource, name)) and hasattr(
getattr(resource, name), '__is_resource__'):
collections.append(name)
if collection is None:
collection_path = ''
else:
collection_path = collection + '/'
for name in collections:
page = re.sub('strong>(%s)<' % name,
r'strong><a href="/%s/%s/%s">\1</a><' % (
service_name, version, collection_path + name), page)
# TODO(jcgregorio) breadcrumbs
# TODO(jcgregorio) sample code?
page = re.sub('<p>', r'<a href="/">Home</a><p>', page, 1)
self.response.out.write(page)
def main():
application = webapp.WSGIApplication(
[
(r'/', MainHandler),
(r'/([^\/]*)/([^\/]*)(?:/(.*))?', ResourceHandler),
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1837,
0.0102,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2143,
0.0102,
0,
0.66,
0.0625,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2245,
0.0102,
0,
0... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import inspect",
"import os",
"import pydoc",
"import re",
"from apiclient.discovery import build",
"from apiclient.anyjson import simplejson",
"from google.appengine.api import memcache",
"from google.appengine.ext import ... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
# Visit https://code.google.com/apis/console to
# generate your client_id, client_secret and to
# register your redirect_uri.
client_id='<YOUR CLIENT ID HERE>',
client_secret='<YOUR CLIENT SECRET HERE>',
scope='https://www.googleapis.com/auth/buzz',
user_agent='buzz-cmdline-sample/1.0',
domain='anonymous',
xoauth_displayname='Google App Engine Example App')
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
service = build("buzz", "v1", http=http)
activities = service.activities()
activitylist = activities.list(scope='@consumption',
userId='@me').execute()
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
self.response.out.write(
template.render(
path, {'activitylist': activitylist,
'logout': logout
}))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1651,
0.0092,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1927,
0.0092,
0,
0.66,
0.05,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2018,
0.0092,
0,
0.6... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from oauth2client.appengine import CredentialsProperty",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.clien... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
# Visit https://code.google.com/apis/console to
# generate your client_id, client_secret and to
# register your redirect_uri.
client_id='<YOUR CLIENT ID HERE>',
client_secret='<YOUR CLIENT SECRET HERE>',
scope='https://www.googleapis.com/auth/buzz',
user_agent='buzz-cmdline-sample/1.0',
domain='anonymous',
xoauth_displayname='Google App Engine Example App')
callback = self.request.relative_url('/auth_return')
authorize_url = flow.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(flow))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
service = build("buzz", "v1", http=http)
activities = service.activities()
activitylist = activities.list(scope='@consumption',
userId='@me').execute()
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
self.response.out.write(
template.render(
path, {'activitylist': activitylist,
'logout': logout
}))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| [
[
14,
0,
0.1651,
0.0092,
0,
0.66,
0,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.1927,
0.0092,
0,
0.66,
0.05,
273,
0,
1,
0,
0,
273,
0,
0
],
[
1,
0,
0.2018,
0.0092,
0,
0.6... | [
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"import httplib2",
"import logging",
"import os",
"import pickle",
"from apiclient.discovery import build",
"from oauth2client.appengine import CredentialsProperty",
"from oauth2client.appengine import StorageByKeyName",
"from oauth2client.clien... |
#!/usr/bin/env python
# Copyright (c) 2010, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module to enforce different constraints on flags.
A validator represents an invariant, enforced over a one or more flags.
See 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.
"""
__author__ = 'olexiy@google.com (Olexiy Oryeshko)'
class Error(Exception):
"""Thrown If validator constraint is not satisfied."""
class Validator(object):
"""Base class for flags validators.
Users should NOT overload these classes, and use flags.Register...
methods instead.
"""
# Used to assign each validator an unique insertion_index
validators_count = 0
def __init__(self, checker, message):
"""Constructor to create all validators.
Args:
checker: function to verify the constraint.
Input of this method varies, see SimpleValidator and
DictionaryValidator for a detailed description.
message: string, error message to be shown to the user
"""
self.checker = checker
self.message = message
Validator.validators_count += 1
# Used to assert validators in the order they were registered (CL/18694236)
self.insertion_index = Validator.validators_count
def Verify(self, flag_values):
"""Verify that constraint is satisfied.
flags library calls this method to verify Validator's constraint.
Args:
flag_values: flags.FlagValues, containing all flags
Raises:
Error: if constraint is not satisfied.
"""
param = self._GetInputToCheckerFunction(flag_values)
if not self.checker(param):
raise Error(self.message)
def GetFlagsNames(self):
"""Return the names of the flags checked by this validator.
Returns:
[string], names of the flags
"""
raise NotImplementedError('This method should be overloaded')
def PrintFlagsWithValues(self, flag_values):
raise NotImplementedError('This method should be overloaded')
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues, containing all flags.
Returns:
Return type depends on the specific validator.
"""
raise NotImplementedError('This method should be overloaded')
class SimpleValidator(Validator):
"""Validator behind RegisterValidator() method.
Validates that a single flag passes its checker function. The checker function
takes the flag value and returns True (if value looks fine) or, if flag value
is not valid, either returns False or raises an Exception."""
def __init__(self, flag_name, checker, message):
"""Constructor.
Args:
flag_name: string, name of the flag.
checker: function to verify the validator.
input - value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(SimpleValidator, self).__init__(checker, message)
self.flag_name = flag_name
def GetFlagsNames(self):
return [self.flag_name]
def PrintFlagsWithValues(self, flag_values):
return 'flag --%s=%s' % (self.flag_name, flag_values[self.flag_name].value)
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
value of the corresponding flag.
"""
return flag_values[self.flag_name].value
class DictionaryValidator(Validator):
"""Validator behind RegisterDictionaryValidator method.
Validates that flag values pass their common checker function. The checker
function takes flag values and returns True (if values look fine) or,
if values are not valid, either returns False or raises an Exception.
"""
def __init__(self, flag_names, checker, message):
"""Constructor.
Args:
flag_names: [string], containing names of the flags used by checker.
checker: function to verify the validator.
input - dictionary, with keys() being flag_names, and value for each
key being the value of the corresponding flag (string, boolean, etc).
output - Boolean. Must return True if validator constraint is satisfied.
If constraint is not satisfied, it should either return False or
raise Error.
message: string, error message to be shown to the user if validator's
condition is not satisfied
"""
super(DictionaryValidator, self).__init__(checker, message)
self.flag_names = flag_names
def _GetInputToCheckerFunction(self, flag_values):
"""Given flag values, construct the input to be given to checker.
Args:
flag_values: flags.FlagValues
Returns:
dictionary, with keys() being self.lag_names, and value for each key
being the value of the corresponding flag (string, boolean, etc).
"""
return dict([key, flag_values[key].value] for key in self.flag_names)
def PrintFlagsWithValues(self, flag_values):
prefix = 'flags '
flags_with_values = []
for key in self.flag_names:
flags_with_values.append('%s=%s' % (key, flag_values[key].value))
return prefix + ', '.join(flags_with_values)
def GetFlagsNames(self):
return self.flag_names
| [
[
8,
0,
0.1818,
0.0267,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.2032,
0.0053,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.2219,
0.0107,
0,
0.66,
... | [
"\"\"\"Module to enforce different constraints on flags.\n\nA validator represents an invariant, enforced over a one or more flags.\nSee 'FLAGS VALIDATORS' in flags.py's docstring for a usage manual.\n\"\"\"",
"__author__ = 'olexiy@google.com (Olexiy Oryeshko)'",
"class Error(Exception):\n \"\"\"Thrown If vali... |
#!/usr/bin/python2.4
#
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""Simple command-line example for Google Prediction API.
Command-line application that trains on some data. This
sample does the same thing as the Hello Prediction! example.
http://code.google.com/apis/predict/docs/hello_world.html
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import time
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run
# Uncomment to get low level HTTP logging
#httplib2.debuglevel = 4
# Name of Google Storage bucket/object that contains the training data
OBJECT_NAME = "apiclient-prediction-sample/prediction_models/languages"
def main():
storage = Storage('prediction.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
# You MUST put in your client id and secret here for this sample to
# work. Visit https://code.google.com/apis/console to get your client
# credentials.
client_id='<Put Your Client ID Here>',
client_secret='<Put Your Client Secret Here>',
scope='https://www.googleapis.com/auth/prediction',
user_agent='prediction-cmdline-sample/1.0',
xoauth_displayname='Prediction Example App')
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("prediction", "v1.1", http=http)
# Start training on a data set
train = service.training()
start = train.insert(data=OBJECT_NAME, body={}).execute()
print 'Started training'
pprint.pprint(start)
# Wait for the training to complete
while 1:
status = train.get(data=OBJECT_NAME).execute()
pprint.pprint(status)
if 'accuracy' in status['modelinfo']:
break
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'mixture': ["mucho bueno"]}}
prediction = service.predict(body=body, data=OBJECT_NAME).execute()
print 'The prediction is:'
pprint.pprint(prediction)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1266,
0.0886,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1899,
0.0127,
0,
0.66,
0.0909,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2152,
0.0127,
0,
0.66,... | [
"\"\"\"Simple command-line example for Google Prediction API.\n\nCommand-line application that trains on some data. This\nsample does the same thing as the Hello Prediction! example.\n\n http://code.google.com/apis/predict/docs/hello_world.html\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"... |
#!/usr/bin/python2.4
#
# -*- coding: utf-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
"""Simple command-line example for Google Prediction API.
Command-line application that trains on some data. This
sample does the same thing as the Hello Prediction! example.
http://code.google.com/apis/predict/docs/hello_world.html
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import time
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run
# Uncomment to get low level HTTP logging
#httplib2.debuglevel = 4
# Name of Google Storage bucket/object that contains the training data
OBJECT_NAME = "apiclient-prediction-sample/prediction_models/languages"
def main():
storage = Storage('prediction.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
flow = OAuth2WebServerFlow(
# You MUST put in your client id and secret here for this sample to
# work. Visit https://code.google.com/apis/console to get your client
# credentials.
client_id='<Put Your Client ID Here>',
client_secret='<Put Your Client Secret Here>',
scope='https://www.googleapis.com/auth/prediction',
user_agent='prediction-cmdline-sample/1.0',
xoauth_displayname='Prediction Example App')
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("prediction", "v1.1", http=http)
# Start training on a data set
train = service.training()
start = train.insert(data=OBJECT_NAME, body={}).execute()
print 'Started training'
pprint.pprint(start)
# Wait for the training to complete
while 1:
status = train.get(data=OBJECT_NAME).execute()
pprint.pprint(status)
if 'accuracy' in status['modelinfo']:
break
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'mixture': ["mucho bueno"]}}
prediction = service.predict(body=body, data=OBJECT_NAME).execute()
print 'The prediction is:'
pprint.pprint(prediction)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.1266,
0.0886,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1899,
0.0127,
0,
0.66,
0.0909,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.2152,
0.0127,
0,
0.66,... | [
"\"\"\"Simple command-line example for Google Prediction API.\n\nCommand-line application that trains on some data. This\nsample does the same thing as the Hello Prediction! example.\n\n http://code.google.com/apis/predict/docs/hello_world.html\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"... |
import pickle
import base64
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import models
from oauth2client.django_orm import FlowField
from oauth2client.django_orm import CredentialsField
# The Flow could also be stored in memcache since it is short lived.
class FlowModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
flow = FlowField()
class CredentialsModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
credential = CredentialsField()
class CredentialsAdmin(admin.ModelAdmin):
pass
class FlowAdmin(admin.ModelAdmin):
pass
admin.site.register(CredentialsModel, CredentialsAdmin)
admin.site.register(FlowModel, FlowAdmin)
| [
[
1,
0,
0.0303,
0.0303,
0,
0.66,
0,
848,
0,
1,
0,
0,
848,
0,
0
],
[
1,
0,
0.0606,
0.0303,
0,
0.66,
0.0833,
177,
0,
1,
0,
0,
177,
0,
0
],
[
1,
0,
0.1212,
0.0303,
0,
... | [
"import pickle",
"import base64",
"from django.contrib import admin",
"from django.contrib.auth.models import User",
"from django.db import models",
"from oauth2client.django_orm import FlowField",
"from oauth2client.django_orm import CredentialsField",
"class FlowModel(models.Model):\n id = models.F... |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| [
[
8,
0,
0.1458,
0.25,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3333,
0.0417,
0,
0.66,
0.3333,
944,
0,
1,
0,
0,
944,
0,
0
],
[
3,
0,
0.5833,
0.2917,
0,
0.66,
... | [
"\"\"\"\nThis file demonstrates two different styles of tests (one doctest and one\nunittest). These will both pass when you run \"manage.py test\".\n\nReplace these with more appropriate tests for your application.\n\"\"\"",
"from django.test import TestCase",
"class SimpleTest(TestCase):\n\n def test_basic... |
import os
import logging
import httplib2
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from oauth2client.django_orm import Storage
from oauth2client.client import OAuth2WebServerFlow
from django_sample.buzz.models import CredentialsModel
from django_sample.buzz.models import FlowModel
from apiclient.discovery import build
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
STEP2_URI = 'http://localhost:8000/auth_return'
@login_required
def index(request):
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
flow = OAuth2WebServerFlow(
client_id='887851474342.apps.googleusercontent.com',
client_secret='6V9MHBUQqOQtxI7uXPIEnV8e',
scope='https://www.googleapis.com/auth/buzz',
user_agent='buzz-django-sample/1.0',
)
authorize_url = flow.step1_get_authorize_url(STEP2_URI)
f = FlowModel(id=request.user, flow=flow)
f.save()
return HttpResponseRedirect(authorize_url)
else:
http = httplib2.Http()
http = credential.authorize(http)
service = build("buzz", "v1", http=http)
activities = service.activities()
activitylist = activities.list(scope='@consumption',
userId='@me').execute()
logging.info(activitylist)
return render_to_response('buzz/welcome.html', {
'activitylist': activitylist,
})
@login_required
def auth_return(request):
try:
f = FlowModel.objects.get(id=request.user)
credential = f.flow.step2_exchange(request.REQUEST)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
f.delete()
return HttpResponseRedirect("/")
except FlowModel.DoesNotExist:
pass
| [
[
1,
0,
0.0164,
0.0164,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0328,
0.0164,
0,
0.66,
0.0667,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0492,
0.0164,
0,
... | [
"import os",
"import logging",
"import httplib2",
"from django.http import HttpResponse",
"from django.core.urlresolvers import reverse",
"from django.contrib.auth.decorators import login_required",
"from oauth2client.django_orm import Storage",
"from oauth2client.client import OAuth2WebServerFlow",
... |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the
directory containing %r. It appears you've customized things. You'll
have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError
somehow.)\n""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1333,
0.0667,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.6667,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.2667,
0.0667,
1,
0.97,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"\"\"Error: Can't find the file 'settings.py' in the\ndirectory containing %r. It appears you've customized things. You'll\nhave to ru... |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the
directory containing %r. It appears you've customized things. You'll
have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError
somehow.)\n""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
[
1,
0,
0.1333,
0.0667,
0,
0.66,
0,
879,
0,
1,
0,
0,
879,
0,
0
],
[
7,
0,
0.5,
0.6667,
0,
0.66,
0.5,
0,
0,
1,
0,
0,
0,
0,
2
],
[
1,
1,
0.2667,
0.0667,
1,
0.04,
... | [
"from django.core.management import execute_manager",
"try:\n import settings # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"\"\"Error: Can't find the file 'settings.py' in the\ndirectory containing %r. It appears you've customized things. You'll\nhave to ru... |
import os
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^$', 'django_sample.buzz.views.index'),
(r'^auth_return', 'django_sample.buzz.views.auth_return'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'buzz/login.html'}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(os.path.dirname(__file__), 'static')
}),
)
| [
[
1,
0,
0.04,
0.04,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.08,
0.04,
0,
0.66,
0.25,
341,
0,
1,
0,
0,
341,
0,
0
],
[
1,
0,
0.2,
0.04,
0,
0.66,
0.5,
... | [
"import os",
"from django.conf.urls.defaults import *",
"from django.contrib import admin",
"admin.autodiscover()",
"urlpatterns = patterns('',\n # Example:\n (r'^$', 'django_sample.buzz.views.index'),\n (r'^auth_return', 'django_sample.buzz.views.auth_return'),\n\n # Uncomment the admin/doc lin... |
# Django settings for django_sample project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'database.sqlite3'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_=9hq-$t_uv1ckf&s!y2$9g$1dm*6p1cl%*!^mg=7gr)!zj32d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
ROOT_URLCONF = 'django_sample.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates')
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_sample.buzz'
)
| [
[
1,
0,
0.0238,
0.0119,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
14,
0,
0.0476,
0.0119,
0,
0.66,
0.0435,
309,
1,
0,
0,
0,
0,
4,
0
],
[
14,
0,
0.0595,
0.0119,
0,
... | [
"import os",
"DEBUG = True",
"TEMPLATE_DEBUG = DEBUG",
"ADMINS = (\n # ('Your Name', 'your_email@domain.com'),\n)",
"MANAGERS = ADMINS",
"DATABASE_ENGINE = 'sqlite3'",
"DATABASE_NAME = 'database.sqlite3'",
"DATABASE_USER = ''",
"DATABASE_PASSWORD = ''",
"DATABASE_HOST = ''",
"DATABASE_PORT = ... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Custom Search.
Command-line application that does a search.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import pprint
# Uncomment the next line to get very detailed logging
# httplib2.debuglevel = 4
def main():
service = build("customsearch", "v1",
developerKey="AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0")
res = service.cse().list(
q='lectures',
cx='017576662512468239146:omuauf_lfve',
).execute()
pprint.pprint(res)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.2419,
0.129,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3548,
0.0323,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.4194,
0.0323,
0,
0.66,
... | [
"\"\"\"Simple command-line example for Custom Search.\n\nCommand-line application that does a search.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"from apiclient.discovery import build",
"import pprint",
"def main():\n service = build(\"customsearch\", \"v1\",\n developerKey... |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Custom Search.
Command-line application that does a search.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import pprint
# Uncomment the next line to get very detailed logging
# httplib2.debuglevel = 4
def main():
service = build("customsearch", "v1",
developerKey="AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0")
res = service.cse().list(
q='lectures',
cx='017576662512468239146:omuauf_lfve',
).execute()
pprint.pprint(res)
if __name__ == '__main__':
main()
| [
[
8,
0,
0.2419,
0.129,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3548,
0.0323,
0,
0.66,
0.2,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.4194,
0.0323,
0,
0.66,
... | [
"\"\"\"Simple command-line example for Custom Search.\n\nCommand-line application that does a search.\n\"\"\"",
"__author__ = 'jcgregorio@google.com (Joe Gregorio)'",
"from apiclient.discovery import build",
"import pprint",
"def main():\n service = build(\"customsearch\", \"v1\",\n developerKey... |
'''
Created on 29/03/2011
@author: Eran_Z
Searching
'''
from math import log
import random
import sys
import google_m
def __search(searchStr):
"""Searches given search string on the web.
Returns number of hits."""
temp = int(sys.maxint*random.random())
print searchStr + " : " + str(temp)
return temp
#return google_m.showsome(searchStr)
def searchSingle(term):
"""Returns number of hits for given term."""
try:
index = __history.index(term)
except ValueError:
#isn't in the list
hits = __search("\"" + term + "\"")
__history.append(term)
__history.append(hits)
return hits
else:
return __history[index + 1]
def searchTogether(term1, term2):
"""Returns number of hits for 2 given terms together."""
try:
index = __history.index((term1, term2))
except ValueError:
#isn't in the list
hits = __search("\"" + term1 + "\" \"" + term2 + "\"")
__history.append((term1, term2))
__history.append(hits)
__history.append((term2, term1))
__history.append(hits)
return hits
else:
return __history[index + 1]
def searchExclusion(term, Ex):
"""Returns number of hits for given term,
excluding pages containing terms from the given exclusion group."""
searchStr = "\"" + term + "\""
for str in Ex:
searchStr += " -\"" + str + "\""
return __search(searchStr)
def NGD(x,y):
logx = log(searchSingle(x))
logy = log(searchSingle(y))
logxy = log(searchTogether(x,y))
logN = log(google_m.N)
return (max(logx, logy) - logxy) / (logN - min(logx, logy))
#a search history, currently saves only 'single' and 'together' searches.
#acts as a sort of cache, eliminating the need of repeated sessions.
__history = []
| [
[
8,
0,
0.0588,
0.1029,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1324,
0.0147,
0,
0.66,
0.1,
526,
0,
1,
0,
0,
526,
0,
0
],
[
1,
0,
0.1471,
0.0147,
0,
0.66,
... | [
"'''\nCreated on 29/03/2011\n\n@author: Eran_Z\n\nSearching\n'''",
"from math import log",
"import random",
"import sys",
"import google_m",
"def __search(searchStr):\n \"\"\"Searches given search string on the web.\n Returns number of hits.\"\"\"\n temp = int(sys.maxint*random.random())\n pri... |
'''
Created on 27/03/2011
@author: Eran_Z
Feasibility study (main)
'''
import weights_m
import scores_m
import io_m
def generateWeights(algorithm, context, world):
"""Generates a list of weights for the context items.
This is a list of numbers (preferrably positive)"""
return algorithm(context, world)
def calculateScores(algorithm, context, weights, world):
"""Calculates the scores for each of the world items.
In the future, it will probably use many internet searches."""
return algorithm(context, weights, world)
def sortWorld(world, scores):
"""Sorts the world and scores lists according to the scores list,
from largest to smallest."""
combined = [(world[i],scores[i]) for i in range(len(world))]
combined = sorted(combined, key=lambda t: t[1], reverse=True)
return ([w[0] for w in combined], [s[1] for s in combined])
########################################################
########################################################
# MAIN function:
def COR_algorithm(weightingAlgorithm, scoringAlgorithm, context, world):
#get settings
chosenWeightingAlg = weights_m.weightingAlgorithms[weightingAlgorithm]
chosenScoringAlg = scores_m.scoringAlgorithms[scoringAlgorithm]
#get input
#context = io_m.getContext()
#world = io_m.getWorld()
#First stage: generate weights
weights = generateWeights(chosenWeightingAlg, context, world)
#Second stage: calculate scores
scores = calculateScores(chosenScoringAlg, context, weights, world)
#sort world according to scores
(world, scores) = sortWorld(world, scores)
#output results
#io_m.output(context, weights, world, scores)
return world
########################################################
########################################################
# invocation of the algorithm:
#COR_algorithm(io_m.getWeightingAlg(), io_m.getScoringAlg(), io_m.getContext(), io_m.getWorld())
| [
[
8,
0,
0.0667,
0.1167,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.15,
0.0167,
0,
0.66,
0.1429,
760,
0,
1,
0,
0,
760,
0,
0
],
[
1,
0,
0.1667,
0.0167,
0,
0.66,
... | [
"'''\nCreated on 27/03/2011\n\n@author: Eran_Z\n\nFeasibility study (main)\n'''",
"import weights_m",
"import scores_m",
"import io_m",
"def generateWeights(algorithm, context, world):\n \"\"\"Generates a list of weights for the context items.\n This is a list of numbers (preferrably positive)\"\"\"\n... |
#!/usr/bin/env python
"""
generatepot.py script.
This script generates the imdbpy.pot file, from the DTD.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
from datetime import datetime as dt
DEFAULT_MESSAGES = { }
ELEMENT_PATTERN = r"""<!ELEMENT\s+([^\s]+)"""
re_element = re.compile(ELEMENT_PATTERN)
POT_HEADER_TEMPLATE = r"""# Gettext message file for imdbpy
msgid ""
msgstr ""
"Project-Id-Version: imdbpy\n"
"POT-Creation-Date: %(now)s\n"
"PO-Revision-Date: YYYY-MM-DD HH:MM+0000\n"
"Last-Translator: YOUR NAME <YOUR@EMAIL>\n"
"Language-Team: TEAM NAME <TEAM@EMAIL>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Preferred-Encodings: utf-8\n"
"Domain: imdbpy\n"
"""
if len(sys.argv) != 2:
print "Usage: %s dtd_file" % sys.argv[0]
sys.exit()
dtdfilename = sys.argv[1]
dtd = open(dtdfilename).read()
elements = re_element.findall(dtd)
uniq = set(elements)
elements = list(uniq)
print POT_HEADER_TEMPLATE % {
'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000")
}
for element in sorted(elements):
if element in DEFAULT_MESSAGES:
print '# Default: %s' % DEFAULT_MESSAGES[element]
else:
print '# Default: %s' % element.replace('-', ' ').capitalize()
print 'msgid "%s"' % element
print 'msgstr ""'
# use this part instead of the line above to generate the po file for English
#if element in DEFAULT_MESSAGES:
# print 'msgstr "%s"' % DEFAULT_MESSAGES[element]
#else:
# print 'msgstr "%s"' % element.replace('-', ' ').capitalize()
print
| [
[
1,
0,
0.2,
0.2,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.4,
0.2,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.8,
0.2,
0,
0.66,
1,
426,... | [
"import re",
"import sys",
"from datetime import datetime as dt"
] |
#!/usr/bin/env python
"""
generatepot.py script.
This script generates the imdbpy.pot file, from the DTD.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import sys
from datetime import datetime as dt
DEFAULT_MESSAGES = { }
ELEMENT_PATTERN = r"""<!ELEMENT\s+([^\s]+)"""
re_element = re.compile(ELEMENT_PATTERN)
POT_HEADER_TEMPLATE = r"""# Gettext message file for imdbpy
msgid ""
msgstr ""
"Project-Id-Version: imdbpy\n"
"POT-Creation-Date: %(now)s\n"
"PO-Revision-Date: YYYY-MM-DD HH:MM+0000\n"
"Last-Translator: YOUR NAME <YOUR@EMAIL>\n"
"Language-Team: TEAM NAME <TEAM@EMAIL>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"Plural-Forms: nplurals=1; plural=0;\n"
"Language-Code: en\n"
"Language-Name: English\n"
"Preferred-Encodings: utf-8\n"
"Domain: imdbpy\n"
"""
if len(sys.argv) != 2:
print "Usage: %s dtd_file" % sys.argv[0]
sys.exit()
dtdfilename = sys.argv[1]
dtd = open(dtdfilename).read()
elements = re_element.findall(dtd)
uniq = set(elements)
elements = list(uniq)
print POT_HEADER_TEMPLATE % {
'now': dt.strftime(dt.now(), "%Y-%m-%d %H:%M+0000")
}
for element in sorted(elements):
if element in DEFAULT_MESSAGES:
print '# Default: %s' % DEFAULT_MESSAGES[element]
else:
print '# Default: %s' % element.replace('-', ' ').capitalize()
print 'msgid "%s"' % element
print 'msgstr ""'
# use this part instead of the line above to generate the po file for English
#if element in DEFAULT_MESSAGES:
# print 'msgstr "%s"' % DEFAULT_MESSAGES[element]
#else:
# print 'msgstr "%s"' % element.replace('-', ' ').capitalize()
print
| [
[
1,
0,
0.2,
0.2,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.4,
0.2,
0,
0.66,
0.5,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.8,
0.2,
0,
0.66,
1,
426,... | [
"import re",
"import sys",
"from datetime import datetime as dt"
] |
#!/usr/bin/env python
"""
rebuildmo.py script.
This script builds the .mo files, from the .po files.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import glob
import msgfmt
import os
#LOCALE_DIR = os.path.dirname(__file__)
def rebuildmo():
lang_glob = 'imdbpy-*.po'
created = []
for input_file in glob.glob(lang_glob):
lang = input_file[7:-3]
if not os.path.exists(lang):
os.mkdir(lang)
mo_dir = os.path.join(lang, 'LC_MESSAGES')
if not os.path.exists(mo_dir):
os.mkdir(mo_dir)
output_file = os.path.join(mo_dir, 'imdbpy.mo')
msgfmt.make(input_file, output_file)
created.append(lang)
return created
if __name__ == '__main__':
languages = rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
| [
[
8,
0,
0.2449,
0.4286,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4898,
0.0204,
0,
0.66,
0.2,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.5102,
0.0204,
0,
0.66,
... | [
"\"\"\"\nrebuildmo.py script.\n\nThis script builds the .mo files, from the .po files.\n\nCopyright 2009 H. Turgut Uyar <uyar@tekir.org>\n\nThis program is free software; you can redistribute it and/or modify",
"import glob",
"import msgfmt",
"import os",
"def rebuildmo():\n lang_glob = 'imdbpy-*.po'\n ... |
#!/usr/bin/env python
"""
rebuildmo.py script.
This script builds the .mo files, from the .po files.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import glob
import msgfmt
import os
#LOCALE_DIR = os.path.dirname(__file__)
def rebuildmo():
lang_glob = 'imdbpy-*.po'
created = []
for input_file in glob.glob(lang_glob):
lang = input_file[7:-3]
if not os.path.exists(lang):
os.mkdir(lang)
mo_dir = os.path.join(lang, 'LC_MESSAGES')
if not os.path.exists(mo_dir):
os.mkdir(mo_dir)
output_file = os.path.join(mo_dir, 'imdbpy.mo')
msgfmt.make(input_file, output_file)
created.append(lang)
return created
if __name__ == '__main__':
languages = rebuildmo()
print 'Created locale for: %s.' % ' '.join(languages)
| [
[
8,
0,
0.2449,
0.4286,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4898,
0.0204,
0,
0.66,
0.2,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.5102,
0.0204,
0,
0.66,
... | [
"\"\"\"\nrebuildmo.py script.\n\nThis script builds the .mo files, from the .po files.\n\nCopyright 2009 H. Turgut Uyar <uyar@tekir.org>\n\nThis program is free software; you can redistribute it and/or modify",
"import glob",
"import msgfmt",
"import os",
"def rebuildmo():\n lang_glob = 'imdbpy-*.po'\n ... |
"""
locale package (imdb package).
This package provides scripts and files for internationalization
of IMDbPY.
Copyright 2009 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import gettext
import os
LOCALE_DIR = os.path.dirname(__file__)
gettext.bindtextdomain('imdbpy', LOCALE_DIR)
| [
[
8,
0,
0.3966,
0.7586,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.8276,
0.0345,
0,
0.66,
0.25,
723,
0,
1,
0,
0,
723,
0,
0
],
[
1,
0,
0.8621,
0.0345,
0,
0.66,
... | [
"\"\"\"\nlocale package (imdb package).\n\nThis package provides scripts and files for internationalization\nof IMDbPY.\n\nCopyright 2009 H. Turgut Uyar <uyar@tekir.org>",
"import gettext",
"import os",
"LOCALE_DIR = os.path.dirname(__file__)",
"gettext.bindtextdomain('imdbpy', LOCALE_DIR)"
] |
"""
Movie module (imdb package).
This module provides the Movie class, used to store information about
a given movie.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb import articles
from imdb.utils import analyze_title, build_title, canonicalTitle, \
flatten, _Container, cmpMovies
class Movie(_Container):
"""A Movie.
Every information about a movie can be accessed as:
movieObject['information']
to get a list of the kind of information stored in a
Movie object, use the keys() method; some useful aliases
are defined (as "casting" for the "casting director" key); see
the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'plot')
# Aliases for some not-so-intuitive keys.
keys_alias = {
'tv schedule': 'airing',
'user rating': 'rating',
'plot summary': 'plot',
'plot summaries': 'plot',
'directed by': 'director',
'created by': 'creator',
'writing credits': 'writer',
'produced by': 'producer',
'original music by': 'original music',
'non-original music by': 'non-original music',
'music': 'original music',
'cinematography by': 'cinematographer',
'cinematography': 'cinematographer',
'film editing by': 'editor',
'film editing': 'editor',
'editing': 'editor',
'actors': 'cast',
'actresses': 'cast',
'casting by': 'casting director',
'casting': 'casting director',
'art direction by': 'art direction',
'set decoration by': 'set decoration',
'costume design by': 'costume designer',
'costume design': 'costume designer',
'makeup department': 'make up',
'makeup': 'make up',
'make-up': 'make up',
'production management': 'production manager',
'production company': 'production companies',
'second unit director or assistant director':
'assistant director',
'second unit director': 'assistant director',
'sound department': 'sound crew',
'costume and wardrobe department': 'costume department',
'special effects by': 'special effects',
'visual effects by': 'visual effects',
'special effects company': 'special effects companies',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'misc crew': 'miscellaneous crew',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'crew members': 'miscellaneous crew',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'other company': 'miscellaneous companies',
'aka': 'akas',
'also known as': 'akas',
'country': 'countries',
'production country': 'countries',
'production countries': 'countries',
'genre': 'genres',
'runtime': 'runtimes',
'lang': 'languages',
'color': 'color info',
'cover': 'cover url',
'full-size cover': 'full-size cover url',
'seasons': 'number of seasons',
'language': 'languages',
'certificate': 'certificates',
'certifications': 'certificates',
'certification': 'certificates',
'miscellaneous links': 'misc links',
'miscellaneous': 'misc links',
'soundclips': 'sound clips',
'videoclips': 'video clips',
'photographs': 'photo sites',
'distributor': 'distributors',
'distribution': 'distributors',
'distribution companies': 'distributors',
'distribution company': 'distributors',
'guest': 'guests',
'guest appearances': 'guests',
'tv guests': 'guests',
'notable tv guest appearances': 'guests',
'episodes cast': 'guests',
'episodes number': 'number of episodes',
'amazon review': 'amazon reviews',
'merchandising': 'merchandising links',
'merchandise': 'merchandising links',
'sales': 'merchandising links',
'faq': 'faqs',
'parental guide': 'parents guide',
'frequently asked questions': 'faqs'}
keys_tomodify_list = ('plot', 'trivia', 'alternate versions', 'goofs',
'quotes', 'dvd', 'laserdisc', 'news', 'soundtrack',
'crazy credits', 'business', 'supplements',
'video review', 'faqs')
cmpFunct = cmpMovies
def _init(self, **kwds):
"""Initialize a Movie object.
*movieID* -- the unique identifier for the movie.
*title* -- the title of the Movie, if not in the data dictionary.
*myTitle* -- your personal title for the movie.
*myID* -- your personal identifier for the movie.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes for the person referred in the currentRole
attribute; e.g.: '(voice)'.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
title = kwds.get('title')
if title and not self.data.has_key('title'):
self.set_title(title)
self.movieID = kwds.get('movieID', None)
self.myTitle = kwds.get('myTitle', u'')
def _reset(self):
"""Reset the Movie object."""
self.movieID = None
self.myTitle = u''
def set_title(self, title):
"""Set the title of the movie."""
# XXX: convert title to unicode, if it's a plain string?
d_title = analyze_title(title)
self.data.update(d_title)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('title'):
addkeys += ['canonical title', 'long imdb title',
'long imdb canonical title',
'smart canonical title',
'smart long imdb canonical title']
if self.data.has_key('episode of'):
addkeys += ['long imdb episode title', 'series title',
'canonical series title', 'episode title',
'canonical episode title',
'smart canonical series title',
'smart canonical episode title']
if self.data.has_key('cover url'):
addkeys += ['full-size cover url']
return addkeys
def guessLanguage(self):
"""Guess the language of the title of this movie; returns None
if there are no hints."""
lang = self.get('languages')
if lang:
lang = lang[0]
else:
country = self.get('countries')
if country:
lang = articles.COUNTRY_LANG.get(country[0])
return lang
def smartCanonicalTitle(self, title=None, lang=None):
"""Return the canonical title, guessing its language.
The title can be forces with the 'title' argument (internally
used) and the language can be forced with the 'lang' argument,
otherwise it's auto-detected."""
if title is None:
title = self.data.get('title', u'')
if lang is None:
lang = self.guessLanguage()
return canonicalTitle(title, lang=lang)
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('episode of'):
if key == 'long imdb episode title':
return build_title(self.data)
elif key == 'series title':
return self.data['episode of']['title']
elif key == 'canonical series title':
ser_title = self.data['episode of']['title']
return canonicalTitle(ser_title)
elif key == 'smart canonical series title':
ser_title = self.data['episode of']['title']
return self.smartCanonicalTitle(ser_title)
elif key == 'episode title':
return self.data.get('title', u'')
elif key == 'canonical episode title':
return canonicalTitle(self.data.get('title', u''))
elif key == 'smart canonical episode title':
return self.smartCanonicalTitle(self.data.get('title', u''))
if self.data.has_key('title'):
if key == 'title':
return self.data['title']
elif key == 'long imdb title':
return build_title(self.data)
elif key == 'canonical title':
return canonicalTitle(self.data['title'])
elif key == 'smart canonical title':
return self.smartCanonicalTitle(self.data['title'])
elif key == 'long imdb canonical title':
return build_title(self.data, canonical=1)
elif key == 'smart long imdb canonical title':
return build_title(self.data, canonical=1,
lang=self.guessLanguage())
if key == 'full-size cover url' and self.data.has_key('cover url'):
return self._re_fullsizeURL.sub('', self.data.get('cover url', ''))
return None
def getID(self):
"""Return the movieID."""
return self.movieID
def __nonzero__(self):
"""The Movie is "false" if the self.data does not contain a title."""
# XXX: check the title and the movieID?
if self.data.has_key('title'): return 1
return 0
def isSameTitle(self, other):
"""Return true if this and the compared object have the same
long imdb title and/or movieID.
"""
# XXX: obsolete?
if not isinstance(other, self.__class__): return 0
if self.data.has_key('title') and \
other.data.has_key('title') and \
build_title(self.data, canonical=0) == \
build_title(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.movieID is not None and self.movieID == other.movieID:
return 1
return 0
isSameMovie = isSameTitle # XXX: just for backward compatiblity.
def __contains__(self, item):
"""Return true if the given Person object is listed in this Movie,
or if the the given Character is represented in this Movie."""
from Person import Person
from Character import Character
from Company import Company
if isinstance(item, Person):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p):
return 1
elif isinstance(item, Character):
for p in flatten(self.data, yieldDictKeys=1, scalar=Person,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(p.currentRole):
return 1
elif isinstance(item, Company):
for c in flatten(self.data, yieldDictKeys=1, scalar=Company,
toDescend=(list, dict, tuple, Movie)):
if item.isSame(c):
return 1
return 0
def __deepcopy__(self, memo):
"""Return a deep copy of a Movie instance."""
m = Movie(title=u'', movieID=self.movieID, myTitle=self.myTitle,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
m.current_info = list(self.current_info)
m.set_mod_funct(self.modFunct)
return m
def __repr__(self):
"""String representation of a Movie object."""
# XXX: add also currentRole and notes, if present?
if self.has_key('long imdb episode title'):
title = self.get('long imdb episode title')
else:
title = self.get('long imdb title')
r = '<Movie id:%s[%s] title:_%s_>' % (self.movieID, self.accessSystem,
title)
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short title."""
return self.get('title', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('title', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the movie."""
if not self: return u''
def _nameAndRole(personList, joiner=u', '):
"""Build a pretty string with name and role."""
nl = []
for person in personList:
n = person.get('name', u'')
if person.currentRole: n += u' (%s)' % person.currentRole
nl.append(n)
return joiner.join(nl)
s = u'Movie\n=====\nTitle: %s\n' % \
self.get('long imdb canonical title', u'')
genres = self.get('genres')
if genres: s += u'Genres: %s.\n' % u', '.join(genres)
director = self.get('director')
if director:
s += u'Director: %s.\n' % _nameAndRole(director)
writer = self.get('writer')
if writer:
s += u'Writer: %s.\n' % _nameAndRole(writer)
cast = self.get('cast')
if cast:
cast = cast[:5]
s += u'Cast: %s.\n' % _nameAndRole(cast)
runtime = self.get('runtimes')
if runtime:
s += u'Runtime: %s.\n' % u', '.join(runtime)
countries = self.get('countries')
if countries:
s += u'Country: %s.\n' % u', '.join(countries)
lang = self.get('languages')
if lang:
s += u'Language: %s.\n' % u', '.join(lang)
rating = self.get('rating')
if rating:
s += u'Rating: %s' % rating
nr_votes = self.get('votes')
if nr_votes:
s += u' (%s votes)' % nr_votes
s += u'.\n'
plot = self.get('plot')
if not plot:
plot = self.get('plot summary')
if plot:
plot = [plot]
if plot:
plot = plot[0]
i = plot.find('::')
if i != -1:
plot = plot[:i]
s += u'Plot: %s' % plot
return s
| [
[
8,
0,
0.0289,
0.0553,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0603,
0.0025,
0,
0.66,
0.25,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.0653,
0.0025,
0,
0.66,
... | [
"\"\"\"\nMovie module (imdb package).\n\nThis module provides the Movie class, used to store information about\na given movie.\n\nCopyright 2004-2010 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb import articles",
"from imdb.utils import analyze_title, build_title, canonicalTit... |
"""
Person module (imdb package).
This module provides the Person class, used to store information about
a given person.
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_name, build_name, normalizeName, \
flatten, _Container, cmpPeople
class Person(_Container):
"""A Person.
Every information about a person can be accessed as:
personObject['information']
to get a list of the kind of information stored in a
Person object, use the keys() method; some useful aliases
are defined (as "biography" for the "mini biography" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {'biography': 'mini biography',
'bio': 'mini biography',
'aka': 'akas',
'also known as': 'akas',
'nick name': 'nick names',
'nicks': 'nick names',
'nickname': 'nick names',
'miscellaneouscrew': 'miscellaneous crew',
'crewmembers': 'miscellaneous crew',
'misc': 'miscellaneous crew',
'guest': 'notable tv guest appearances',
'guests': 'notable tv guest appearances',
'tv guest': 'notable tv guest appearances',
'guest appearances': 'notable tv guest appearances',
'spouses': 'spouse',
'salary': 'salary history',
'salaries': 'salary history',
'otherworks': 'other works',
"maltin's biography":
"biography from leonard maltin's movie encyclopedia",
"leonard maltin's biography":
"biography from leonard maltin's movie encyclopedia",
'real name': 'birth name',
'where are they now': 'where now',
'personal quotes': 'quotes',
'mini-biography author': 'imdb mini-biography by',
'biography author': 'imdb mini-biography by',
'genre': 'genres',
'portrayed': 'portrayed in',
'keys': 'keywords',
'trademarks': 'trade mark',
'trade mark': 'trade mark',
'trade marks': 'trade mark',
'trademark': 'trade mark',
'pictorials': 'pictorial',
'magazine covers': 'magazine cover photo',
'magazine-covers': 'magazine cover photo',
'tv series episodes': 'episodes',
'tv-series episodes': 'episodes',
'articles': 'article',
'keyword': 'keywords'}
# 'nick names'???
keys_tomodify_list = ('mini biography', 'spouse', 'quotes', 'other works',
'salary history', 'trivia', 'trade mark', 'news',
'books', 'biographical movies', 'portrayed in',
'where now', 'interviews', 'article',
"biography from leonard maltin's movie encyclopedia")
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Person object.
*personID* -- the unique identifier for the person.
*name* -- the name of the Person, if not in the data dictionary.
*myName* -- the nickname you use for this person.
*myID* -- your personal id for this person.
*data* -- a dictionary used to initialize the object.
*currentRole* -- a Character instance representing the current role
or duty of a person in this movie, or a Person
object representing the actor/actress who played
a given character in a Movie. If a string is
passed, an object is automatically build.
*roleID* -- if available, the characterID/personID of the currentRole
object.
*roleIsPerson* -- when False (default) the currentRole is assumed
to be a Character object, otherwise a Person.
*notes* -- notes about the given person for a specific movie
or role (e.g.: the alias used in the movie credits).
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*modFunct* -- function called returning text fields.
*billingPos* -- position of this person in the credits list.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.personID = kwds.get('personID', None)
self.myName = kwds.get('myName', u'')
self.billingPos = kwds.get('billingPos', None)
def _reset(self):
"""Reset the Person object."""
self.personID = None
self.myName = u''
self.billingPos = None
def _clear(self):
"""Reset the dictionary."""
self.billingPos = None
def set_name(self, name):
"""Set the name of the person."""
# XXX: convert name to unicode, if it's a plain string?
d = analyze_name(name, canonical=1)
self.data.update(d)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('name'):
addkeys += ['canonical name', 'long imdb name',
'long imdb canonical name']
if self.data.has_key('headshot'):
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
if self.data.has_key('name'):
if key == 'name':
return normalizeName(self.data['name'])
elif key == 'canonical name':
return self.data['name']
elif key == 'long imdb name':
return build_name(self.data, canonical=0)
elif key == 'long imdb canonical name':
return build_name(self.data)
if key == 'full-size headshot' and self.data.has_key('headshot'):
return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
return None
def getID(self):
"""Return the personID."""
return self.personID
def __nonzero__(self):
"""The Person is "false" if the self.data does not contain a name."""
# XXX: check the name and the personID?
if self.data.has_key('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this Person has worked in the given Movie,
or if the fiven Character was played by this Person."""
from Movie import Movie
from Character import Character
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
elif isinstance(item, Character):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m.currentRole):
return 1
return 0
def isSameName(self, other):
"""Return true if two persons have the same name and imdbIndex
and/or personID.
"""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_name(self.data, canonical=1) == \
build_name(other.data, canonical=1):
return 1
if self.accessSystem == other.accessSystem and \
self.personID and self.personID == other.personID:
return 1
return 0
isSamePerson = isSameName # XXX: just for backward compatiblity.
def __deepcopy__(self, memo):
"""Return a deep copy of a Person instance."""
p = Person(name=u'', personID=self.personID, myName=self.myName,
myID=self.myID, data=deepcopy(self.data, memo),
currentRole=deepcopy(self.currentRole, memo),
roleIsPerson=self._roleIsPerson,
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
p.current_info = list(self.current_info)
p.set_mod_funct(self.modFunct)
p.billingPos = self.billingPos
return p
def __repr__(self):
"""String representation of a Person object."""
# XXX: add also currentRole and notes, if present?
r = '<Person id:%s[%s] name:_%s_>' % (self.personID, self.accessSystem,
self.get('long imdb canonical name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the person."""
if not self: return u''
s = u'Person\n=====\nName: %s\n' % \
self.get('long imdb canonical name', u'')
bdate = self.get('birth date')
if bdate:
s += u'Birth date: %s' % bdate
bnotes = self.get('birth notes')
if bnotes:
s += u' (%s)' % bnotes
s += u'.\n'
ddate = self.get('death date')
if ddate:
s += u'Death date: %s' % ddate
dnotes = self.get('death notes')
if dnotes:
s += u' (%s)' % dnotes
s += u'.\n'
bio = self.get('mini biography')
if bio:
s += u'Biography: %s\n' % bio[0]
director = self.get('director')
if director:
d_list = [x.get('long imdb canonical title', u'')
for x in director[:3]]
s += u'Last movies directed: %s.\n' % u'; '.join(d_list)
act = self.get('actor') or self.get('actress')
if act:
a_list = [x.get('long imdb canonical title', u'')
for x in act[:5]]
s += u'Last movies acted: %s.\n' % u'; '.join(a_list)
return s
| [
[
8,
0,
0.0418,
0.08,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0873,
0.0036,
0,
0.66,
0.3333,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.0964,
0.0073,
0,
0.66,
... | [
"\"\"\"\nPerson module (imdb package).\n\nThis module provides the Person class, used to store information about\na given person.\n\nCopyright 2004-2010 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb.utils import analyze_name, build_name, normalizeName, \\\n ... |
"""
Character module (imdb package).
This module provides the Character class, used to store information about
a given character.
Copyright 2007-2010 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople
class Character(_Container):
"""A Character.
Every information about a character can be accessed as:
characterObject['information']
to get a list of the kind of information stored in a
Character object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main', 'filmography', 'biography')
# Aliases for some not-so-intuitive keys.
keys_alias = {'mini biography': 'biography',
'bio': 'biography',
'character biography': 'biography',
'character biographies': 'biography',
'biographies': 'biography',
'character bio': 'biography',
'aka': 'akas',
'also known as': 'akas',
'alternate names': 'akas',
'personal quotes': 'quotes',
'keys': 'keywords',
'keyword': 'keywords'}
keys_tomodify_list = ('biography', 'quotes')
cmpFunct = cmpPeople
def _init(self, **kwds):
"""Initialize a Character object.
*characterID* -- the unique identifier for the character.
*name* -- the name of the Character, if not in the data dictionary.
*myName* -- the nickname you use for this character.
*myID* -- your personal id for this character.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given character.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to characters.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.characterID = kwds.get('characterID', None)
self.myName = kwds.get('myName', u'')
def _reset(self):
"""Reset the Character object."""
self.characterID = None
self.myName = u''
def set_name(self, name):
"""Set the name of the character."""
# XXX: convert name to unicode, if it's a plain string?
d = analyze_name(name, canonical=0)
self.data.update(d)
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
addkeys = []
if self.data.has_key('name'):
addkeys += ['long imdb name']
if self.data.has_key('headshot'):
addkeys += ['full-size headshot']
return addkeys
def _getitem(self, key):
"""Handle special keys."""
## XXX: can a character have an imdbIndex?
if self.data.has_key('name'):
if key == 'long imdb name':
return build_name(self.data)
if key == 'full-size headshot' and self.data.has_key('headshot'):
return self._re_fullsizeURL.sub('', self.data.get('headshot', ''))
return None
def getID(self):
"""Return the characterID."""
return self.characterID
def __nonzero__(self):
"""The Character is "false" if the self.data does not contain a name."""
# XXX: check the name and the characterID?
if self.data.get('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this Character was portrayed in the given Movie
or it was impersonated by the given Person."""
from Movie import Movie
from Person import Person
if isinstance(item, Person):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m.currentRole):
return 1
elif isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
return 0
def isSameName(self, other):
"""Return true if two character have the same name
and/or characterID."""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_name(self.data, canonical=0) == \
build_name(other.data, canonical=0):
return 1
if self.accessSystem == other.accessSystem and \
self.characterID is not None and \
self.characterID == other.characterID:
return 1
return 0
isSameCharacter = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a Character instance."""
c = Character(name=u'', characterID=self.characterID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Character object."""
r = '<Character id:%s[%s] name:_%s_>' % (self.characterID,
self.accessSystem,
self.get('name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the character."""
if not self: return u''
s = u'Character\n=====\nName: %s\n' % \
self.get('name', u'')
bio = self.get('biography')
if bio:
s += u'Biography: %s\n' % bio[0]
filmo = self.get('filmography')
if filmo:
a_list = [x.get('long imdb canonical title', u'')
for x in filmo[:5]]
s += u'Last movies with this character: %s.\n' % u'; '.join(a_list)
return s
| [
[
8,
0,
0.0584,
0.1117,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1218,
0.0051,
0,
0.66,
0.3333,
739,
0,
1,
0,
0,
739,
0,
0
],
[
1,
0,
0.132,
0.0051,
0,
0.66,... | [
"\"\"\"\nCharacter module (imdb package).\n\nThis module provides the Character class, used to store information about\na given character.\n\nCopyright 2007-2010 Davide Alberani <da@erlug.linux.it>",
"from copy import deepcopy",
"from imdb.utils import analyze_name, build_name, flatten, _Container, cmpPeople",
... |
#-*- encoding: utf-8 -*-
"""
parser.sql.dbschema module (imdb.parser.sql package).
This module provides the schema used to describe the layout of the
database used by the imdb.parser.sql package; functions to create/drop
tables and indexes are also provided.
Copyright 2005-2010 Davide Alberani <da@erlug.linux.it>
2006 Giuseppe "Cowo" Corbelli <cowo --> lugbs.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import logging
_dbschema_logger = logging.getLogger('imdbpy.parser.sql.dbschema')
# Placeholders for column types.
INTCOL = 1
UNICODECOL = 2
STRINGCOL = 3
_strMap = {1: 'INTCOL', 2: 'UNICODECOL', 3: 'STRINGCOL'}
class DBCol(object):
"""Define column objects."""
def __init__(self, name, kind, **params):
self.name = name
self.kind = kind
self.index = None
self.indexLen = None
# If not None, two notations are accepted: 'TableName'
# and 'TableName.ColName'; in the first case, 'id' is assumed
# as the name of the pointed column.
self.foreignKey = None
if 'index' in params:
self.index = params['index']
del params['index']
if 'indexLen' in params:
self.indexLen = params['indexLen']
del params['indexLen']
if 'foreignKey' in params:
self.foreignKey = params['foreignKey']
del params['foreignKey']
self.params = params
def __str__(self):
"""Class representation."""
s = '<DBCol %s %s' % (self.name, _strMap[self.kind])
if self.index:
s += ' INDEX'
if self.indexLen:
s += '[:%d]' % self.indexLen
if self.foreignKey:
s += ' FOREIGN'
if 'default' in self.params:
val = self.params['default']
if val is not None:
val = '"%s"' % val
s += ' DEFAULT=%s' % val
for param in self.params:
if param == 'default': continue
s += ' %s' % param.upper()
s += '>'
return s
def __repr__(self):
"""Class representation."""
s = '<DBCol(name="%s", %s' % (self.name, _strMap[self.kind])
if self.index:
s += ', index="%s"' % self.index
if self.indexLen:
s += ', indexLen=%d' % self.indexLen
if self.foreignKey:
s += ', foreignKey="%s"' % self.foreignKey
for param in self.params:
val = self.params[param]
if isinstance(val, (unicode, str)):
val = u'"%s"' % val
s += ', %s=%s' % (param, val)
s += ')>'
return s
class DBTable(object):
"""Define table objects."""
def __init__(self, name, *cols, **kwds):
self.name = name
self.cols = cols
# Default values.
self.values = kwds.get('values', {})
def __str__(self):
"""Class representation."""
return '<DBTable %s (%d cols, %d values)>' % (self.name,
len(self.cols), sum([len(v) for v in self.values.values()]))
def __repr__(self):
"""Class representation."""
s = '<DBTable(name="%s"' % self.name
col_s = ', '.join([repr(col).rstrip('>').lstrip('<')
for col in self.cols])
if col_s:
s += ', %s' % col_s
if self.values:
s += ', values=%s' % self.values
s += ')>'
return s
# Default values to insert in some tables: {'column': (list, of, values, ...)}
kindTypeDefs = {'kind': ('movie', 'tv series', 'tv movie', 'video movie',
'tv mini series', 'video game', 'episode')}
companyTypeDefs = {'kind': ('distributors', 'production companies',
'special effects companies', 'miscellaneous companies')}
infoTypeDefs = {'info': ('runtimes', 'color info', 'genres', 'languages',
'certificates', 'sound mix', 'tech info', 'countries', 'taglines',
'keywords', 'alternate versions', 'crazy credits', 'goofs',
'soundtrack', 'quotes', 'release dates', 'trivia', 'locations',
'mini biography', 'birth notes', 'birth date', 'height',
'death date', 'spouse', 'other works', 'birth name',
'salary history', 'nick names', 'books', 'agent address',
'biographical movies', 'portrayed in', 'where now', 'trade mark',
'interviews', 'article', 'magazine cover photo', 'pictorial',
'death notes', 'LD disc format', 'LD year', 'LD digital sound',
'LD official retail price', 'LD frequency response', 'LD pressing plant',
'LD length', 'LD language', 'LD review', 'LD spaciality', 'LD release date',
'LD production country', 'LD contrast', 'LD color rendition',
'LD picture format', 'LD video noise', 'LD video artifacts',
'LD release country', 'LD sharpness', 'LD dynamic range',
'LD audio noise', 'LD color information', 'LD group genre',
'LD quality program', 'LD close captions-teletext-ld-g',
'LD category', 'LD analog left', 'LD certification',
'LD audio quality', 'LD video quality', 'LD aspect ratio',
'LD analog right', 'LD additional information',
'LD number of chapter stops', 'LD dialogue intellegibility',
'LD disc size', 'LD master format', 'LD subtitles',
'LD status of availablility', 'LD quality of source',
'LD number of sides', 'LD video standard', 'LD supplement',
'LD original title', 'LD sound encoding', 'LD number', 'LD label',
'LD catalog number', 'LD laserdisc title', 'screenplay-teleplay',
'novel', 'adaption', 'book', 'production process protocol',
'printed media reviews', 'essays', 'other literature', 'mpaa',
'plot', 'votes distribution', 'votes', 'rating',
'production dates', 'copyright holder', 'filming dates', 'budget',
'weekend gross', 'gross', 'opening weekend', 'rentals',
'admissions', 'studios', 'top 250 rank', 'bottom 10 rank')}
compCastTypeDefs = {'kind': ('cast', 'crew', 'complete', 'complete+verified')}
linkTypeDefs = {'link': ('follows', 'followed by', 'remake of', 'remade as',
'references', 'referenced in', 'spoofs', 'spoofed in',
'features', 'featured in', 'spin off from', 'spin off',
'version of', 'similar to', 'edited into',
'edited from', 'alternate language version of',
'unknown link')}
roleTypeDefs = {'role': ('actor', 'actress', 'producer', 'writer',
'cinematographer', 'composer', 'costume designer',
'director', 'editor', 'miscellaneous crew',
'production designer', 'guest')}
# Schema of tables in our database.
# XXX: Foreign keys can be used to create constrains between tables,
# but they create indexes in the database, and this
# means poor performances at insert-time.
DB_SCHEMA = [
DBTable('Name',
# namePcodeCf is the soundex of the name in the canonical format.
# namePcodeNf is the soundex of the name in the normal format, if
# different from namePcodeCf.
# surnamePcode is the soundex of the surname, if different from the
# other two values.
# The 'id' column is simply skipped by SQLObject (it's a default);
# the alternateID attribute here will be ignored by SQLAlchemy.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CharName',
# namePcodeNf is the soundex of the name in the normal format.
# surnamePcode is the soundex of the surname, if different
# from namePcodeNf.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyName',
# namePcodeNf is the soundex of the name in the normal format.
# namePcodeSf is the soundex of the name plus the country code.
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('name', UNICODECOL, notNone=True, index='idx_name', indexLen=6),
DBCol('countryCode', UNICODECOL, length=255, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('namePcodeSf', STRINGCOL, length=5, default=None,
index='idx_pcodesf'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('KindType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=15, default=None, alternateID=True),
values=kindTypeDefs
),
DBTable('Title',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('title', UNICODECOL, notNone=True,
index='idx_title', indexLen=10),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('imdbID', INTCOL, default=None),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='Title'),
DBCol('seasonNr', INTCOL, default=None),
DBCol('episodeNr', INTCOL, default=None),
# Maximum observed length is 44; 49 can store 5 comma-separated
# year-year pairs.
DBCol('seriesYears', STRINGCOL, length=49, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('CompanyType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, default=None, alternateID=True),
values=companyTypeDefs
),
DBTable('AkaName',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_person',
foreignKey='Name'),
DBCol('name', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('namePcodeCf', STRINGCOL, length=5, default=None,
index='idx_pcodecf'),
DBCol('namePcodeNf', STRINGCOL, length=5, default=None,
index='idx_pcodenf'),
DBCol('surnamePcode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('AkaTitle',
# XXX: It's safer to set notNone to False, here.
# alias for akas are stored completely in the AkaTitle table;
# this means that episodes will set also a "tv series" alias name.
# Reading the aka-title.list file it looks like there are
# episode titles with aliases to different titles for both
# the episode and the series title, while for just the series
# there are no aliases.
# E.g.:
# aka title original title
# "Series, The" (2005) {The Episode} "Other Title" (2005) {Other Title}
# But there is no:
# "Series, The" (2005) "Other Title" (2005)
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_movieid',
foreignKey='Title'),
DBCol('title', UNICODECOL, notNone=True),
DBCol('imdbIndex', UNICODECOL, length=12, default=None),
DBCol('kindID', INTCOL, notNone=True, foreignKey='KindType'),
DBCol('productionYear', INTCOL, default=None),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode'),
DBCol('episodeOfID', INTCOL, default=None, index='idx_epof',
foreignKey='AkaTitle'),
DBCol('seasonNr', INTCOL, default=None),
DBCol('episodeNr', INTCOL, default=None),
DBCol('note', UNICODECOL, default=None),
DBCol('md5sum', STRINGCOL, length=32, default=None, index='idx_md5')
),
DBTable('RoleType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('role', STRINGCOL, length=32, notNone=True, alternateID=True),
values=roleTypeDefs
),
DBTable('CastInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('personRoleID', INTCOL, default=None, index='idx_cid',
foreignKey='CharName'),
DBCol('note', UNICODECOL, default=None),
DBCol('nrOrder', INTCOL, default=None),
DBCol('roleID', INTCOL, notNone=True, foreignKey='RoleType')
),
DBTable('CompCastType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('kind', STRINGCOL, length=32, notNone=True, alternateID=True),
values=compCastTypeDefs
),
DBTable('CompleteCast',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, index='idx_mid', foreignKey='Title'),
DBCol('subjectID', INTCOL, notNone=True, foreignKey='CompCastType'),
DBCol('statusID', INTCOL, notNone=True, foreignKey='CompCastType')
),
DBTable('InfoType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('info', STRINGCOL, length=32, notNone=True, alternateID=True),
values=infoTypeDefs
),
DBTable('LinkType',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('link', STRINGCOL, length=32, notNone=True, alternateID=True),
values=linkTypeDefs
),
DBTable('Keyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
# XXX: can't use alternateID=True, because it would create
# a UNIQUE index; unfortunately (at least with a common
# collation like utf8_unicode_ci) MySQL will consider
# some different keywords identical - like
# "fiancée" and "fiancee".
DBCol('keyword', UNICODECOL, length=255, notNone=True,
index='idx_keyword', indexLen=5),
DBCol('phoneticCode', STRINGCOL, length=5, default=None,
index='idx_pcode')
),
DBTable('MovieKeyword',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('keywordID', INTCOL, notNone=True, index='idx_keywordid',
foreignKey='Keyword')
),
DBTable('MovieLink',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('linkedMovieID', INTCOL, notNone=True, foreignKey='Title'),
DBCol('linkTypeID', INTCOL, notNone=True, foreignKey='LinkType')
),
DBTable('MovieInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
),
# This table is identical to MovieInfo, except that both 'infoTypeID'
# and 'info' are indexed.
DBTable('MovieInfoIdx',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('infoTypeID', INTCOL, notNone=True, index='idx_infotypeid',
foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True, index='idx_info', indexLen=10),
DBCol('note', UNICODECOL, default=None)
),
DBTable('MovieCompanies',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('movieID', INTCOL, notNone=True, index='idx_mid',
foreignKey='Title'),
DBCol('companyID', INTCOL, notNone=True, index='idx_cid',
foreignKey='CompanyName'),
DBCol('companyTypeID', INTCOL, notNone=True, foreignKey='CompanyType'),
DBCol('note', UNICODECOL, default=None)
),
DBTable('PersonInfo',
DBCol('id', INTCOL, notNone=True, alternateID=True),
DBCol('personID', INTCOL, notNone=True, index='idx_pid',
foreignKey='Name'),
DBCol('infoTypeID', INTCOL, notNone=True, foreignKey='InfoType'),
DBCol('info', UNICODECOL, notNone=True),
DBCol('note', UNICODECOL, default=None)
)
]
# Functions to manage tables.
def dropTables(tables, ifExists=True):
"""Drop the tables."""
# In reverse order (useful to avoid errors about foreign keys).
DB_TABLES_DROP = list(tables)
DB_TABLES_DROP.reverse()
for table in DB_TABLES_DROP:
_dbschema_logger.info('dropping table %s', table._imdbpyName)
table.dropTable(ifExists)
def createTables(tables, ifNotExists=True):
"""Create the tables and insert default values."""
for table in tables:
# Create the table.
_dbschema_logger.info('creating table %s', table._imdbpyName)
table.createTable(ifNotExists)
# Insert default values, if any.
if table._imdbpySchema.values:
_dbschema_logger.info('inserting values into table %s',
table._imdbpyName)
for key in table._imdbpySchema.values:
for value in table._imdbpySchema.values[key]:
table(**{key: unicode(value)})
def createIndexes(tables, ifNotExists=True):
"""Create the indexes in the database."""
for table in tables:
_dbschema_logger.info('creating indexes for table %s',
table._imdbpyName)
table.addIndexes(ifNotExists)
def createForeignKeys(tables, ifNotExists=True):
"""Create Foreign Keys."""
mapTables = {}
for table in tables:
mapTables[table._imdbpyName] = table
for table in tables:
_dbschema_logger.info('creating foreign keys for table %s',
table._imdbpyName)
table.addForeignKeys(mapTables, ifNotExists)
| [
[
8,
0,
0.0293,
0.0521,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0586,
0.0022,
0,
0.66,
0.0526,
715,
0,
1,
0,
0,
715,
0,
0
],
[
14,
0,
0.0629,
0.0022,
0,
0.6... | [
"\"\"\"\nparser.sql.dbschema module (imdb.parser.sql package).\n\nThis module provides the schema used to describe the layout of the\ndatabase used by the imdb.parser.sql package; functions to create/drop\ntables and indexes are also provided.\n\nCopyright 2005-2010 Davide Alberani <da@erlug.linux.it>",
"import l... |
"""
parser.http.movieParser module (imdb package).
This module provides the classes (and the instances), used to parse the
IMDb pages on the akas.imdb.com server about a movie.
E.g., for Brian De Palma's "The Untouchables", the referred
pages would be:
combined details: http://akas.imdb.com/title/tt0094226/combined
plot summary: http://akas.imdb.com/title/tt0094226/plotsummary
...and so on...
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
import urllib
from imdb import imdbURL_base
from imdb.Person import Person
from imdb.Movie import Movie
from imdb.Company import Company
from imdb.utils import analyze_title, split_company_name_notes, _Container
from utils import build_person, DOMParserBase, Attribute, Extractor, \
analyze_imdbid
# Dictionary used to convert some section's names.
_SECT_CONV = {
'directed': 'director',
'directed by': 'director',
'directors': 'director',
'editors': 'editor',
'writing credits': 'writer',
'writers': 'writer',
'produced': 'producer',
'cinematography': 'cinematographer',
'film editing': 'editor',
'casting': 'casting director',
'costume design': 'costume designer',
'makeup department': 'make up',
'production management': 'production manager',
'second unit director or assistant director': 'assistant director',
'costume and wardrobe department': 'costume department',
'sound department': 'sound crew',
'stunts': 'stunt performer',
'other crew': 'miscellaneous crew',
'also known as': 'akas',
'country': 'countries',
'runtime': 'runtimes',
'language': 'languages',
'certification': 'certificates',
'genre': 'genres',
'created': 'creator',
'creators': 'creator',
'color': 'color info',
'plot': 'plot outline',
'seasons': 'number of seasons',
'art directors': 'art direction',
'assistant directors': 'assistant director',
'set decorators': 'set decoration',
'visual effects department': 'visual effects',
'production managers': 'production manager',
'miscellaneous': 'miscellaneous crew',
'make up department': 'make up',
'plot summary': 'plot outline',
'cinematographers': 'cinematographer',
'camera department': 'camera and electrical department',
'costume designers': 'costume designer',
'production designers': 'production design',
'production managers': 'production manager',
'music original': 'original music',
'casting directors': 'casting director',
'other companies': 'miscellaneous companies',
'producers': 'producer',
'special effects by': 'special effects department',
'special effects': 'special effects companies'
}
def _manageRoles(mo):
"""Perform some transformation on the html, so that roleIDs can
be easily retrieved."""
firstHalf = mo.group(1)
secondHalf = mo.group(2)
newRoles = []
roles = secondHalf.split(' / ')
for role in roles:
role = role.strip()
if not role:
continue
roleID = analyze_imdbid(role)
if roleID is None:
roleID = u'/'
else:
roleID += u'/'
newRoles.append(u'<div class="_imdbpyrole" roleid="%s">%s</div>' % \
(roleID, role.strip()))
return firstHalf + u' / '.join(newRoles) + mo.group(3)
_reRolesMovie = re.compile(r'(<td class="char">)(.*?)(</td>)',
re.I | re.M | re.S)
def _replaceBR(mo):
"""Replaces <br> tags with '::' (useful for some akas)"""
txt = mo.group(0)
return txt.replace('<br>', '::')
_reAkas = re.compile(r'<h5>also known as:</h5>.*?</div>', re.I | re.M | re.S)
def makeSplitter(lstrip=None, sep='|', comments=True,
origNotesSep=' (', newNotesSep='::(', strip=None):
"""Return a splitter function suitable for a given set of data."""
def splitter(x):
if not x: return x
x = x.strip()
if not x: return x
if lstrip is not None:
x = x.lstrip(lstrip).lstrip()
lx = x.split(sep)
lx[:] = filter(None, [j.strip() for j in lx])
if comments:
lx[:] = [j.replace(origNotesSep, newNotesSep, 1) for j in lx]
if strip:
lx[:] = [j.strip(strip) for j in lx]
return lx
return splitter
def _toInt(val, replace=()):
"""Return the value, converted to integer, or None; if present, 'replace'
must be a list of tuples of values to replace."""
for before, after in replace:
val = val.replace(before, after)
try:
return int(val)
except (TypeError, ValueError):
return None
class DOMHTMLMovieParser(DOMParserBase):
"""Parser for the "combined details" (and if instance.mdparse is
True also for the "main details") page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
mparser = DOMHTMLMovieParser()
result = mparser.parse(combined_details_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title',
path="//h1",
attrs=Attribute(key='title',
path=".//text()",
postprocess=analyze_title)),
Extractor(label='glossarysections',
group="//a[@class='glossary']",
group_key="./@name",
group_key_normalize=lambda x: x.replace('_', ' '),
path="../../../..//tr",
attrs=Attribute(key=None,
multi=True,
path={'person': ".//text()",
'link': "./td[1]/a[@href]/@href"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='cast',
path="//table[@class='cast']//tr",
attrs=Attribute(key="cast",
multi=True,
path={'person': ".//text()",
'link': "td[2]/a/@href",
'roleID': \
"td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'))
)),
Extractor(label='genres',
path="//div[@class='info']//a[starts-with(@href," \
" '/Sections/Genres')]",
attrs=Attribute(key="genres",
multi=True,
path="./text()")),
Extractor(label='h5sections',
path="//div[@class='info']/h5/..",
attrs=[
Attribute(key="plot summary",
path="./h5[starts-with(text(), " \
"'Plot:')]/../div/text()",
postprocess=lambda x: \
x.strip().rstrip('|').rstrip()),
Attribute(key="aspect ratio",
path="./h5[starts-with(text()," \
" 'Aspect')]/../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="mpaa",
path="./h5/a[starts-with(text()," \
" 'MPAA')]/../../div/text()",
postprocess=lambda x: x.strip()),
Attribute(key="countries",
path="./h5[starts-with(text(), " \
"'Countr')]/../div[@class='info-content']//text()",
postprocess=makeSplitter('|')),
Attribute(key="language",
path="./h5[starts-with(text(), " \
"'Language')]/..//text()",
postprocess=makeSplitter('Language:')),
Attribute(key='color info',
path="./h5[starts-with(text(), " \
"'Color')]/..//text()",
postprocess=makeSplitter('Color:')),
Attribute(key='sound mix',
path="./h5[starts-with(text(), " \
"'Sound Mix')]/..//text()",
postprocess=makeSplitter('Sound Mix:')),
# Collects akas not encosed in <i> tags.
Attribute(key='other akas',
path="./h5[starts-with(text(), " \
"'Also Known As')]/../div//text()",
postprocess=makeSplitter(sep='::',
origNotesSep='" - ',
newNotesSep='::',
strip='"')),
Attribute(key='runtimes',
path="./h5[starts-with(text(), " \
"'Runtime')]/../div/text()",
postprocess=makeSplitter()),
Attribute(key='certificates',
path="./h5[starts-with(text(), " \
"'Certificat')]/..//text()",
postprocess=makeSplitter('Certification:')),
Attribute(key='number of seasons',
path="./h5[starts-with(text(), " \
"'Seasons')]/..//text()",
postprocess=lambda x: x.count('|') + 1),
Attribute(key='original air date',
path="./h5[starts-with(text(), " \
"'Original Air Date')]/../div/text()"),
Attribute(key='tv series link',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/@href"),
Attribute(key='tv series title',
path="./h5[starts-with(text(), " \
"'TV Series')]/..//a/text()")
]),
Extractor(label='creator',
path="//h5[starts-with(text(), 'Creator')]/..//a",
attrs=Attribute(key='creator', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin writer',
path="//h5[starts-with(text(), 'Writer')]/..//a",
attrs=Attribute(key='thin writer', multi=True,
path={'name': "./text()",
'link': "./@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='thin director',
path="//h5[starts-with(text(), 'Director')]/..//a",
attrs=Attribute(key='thin director', multi=True,
path={'name': "./text()",
'link': "@href"},
postprocess=lambda x: \
build_person(x.get('name') or u'',
personID=analyze_imdbid(x.get('link')))
)),
Extractor(label='top 250/bottom 100',
path="//div[@class='starbar-special']/" \
"a[starts-with(@href, '/chart/')]",
attrs=Attribute(key='top/bottom rank',
path="./text()")),
Extractor(label='series years',
path="//div[@id='tn15title']//span" \
"[starts-with(text(), 'TV series')]",
attrs=Attribute(key='series years',
path="./text()",
postprocess=lambda x: \
x.replace('TV series','').strip())),
Extractor(label='number of episodes',
path="//a[@title='Full Episode List']",
attrs=Attribute(key='number of episodes',
path="./text()",
postprocess=lambda x: \
_toInt(x, [(' Episodes', '')]))),
Extractor(label='akas',
path="//i[@class='transl']",
attrs=Attribute(key='akas', multi=True, path='text()',
postprocess=lambda x:
x.replace(' ', ' ').rstrip('-').replace('" - ',
'"::', 1).strip('"').replace(' ', ' '))),
Extractor(label='production notes/status',
path="//div[@class='info inprod']",
attrs=Attribute(key='production notes',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='blackcatheader',
group="//b[@class='blackcatheader']",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../ul/li",
attrs=Attribute(key=None,
multi=True,
path={'name': "./a//text()",
'comp-link': "./a/@href",
'notes': "./text()"},
postprocess=lambda x: \
Company(name=x.get('name') or u'',
companyID=analyze_imdbid(x.get('comp-link')),
notes=(x.get('notes') or u'').strip())
)),
Extractor(label='rating',
path="//div[@class='starbar-meta']/b",
attrs=Attribute(key='rating',
path=".//text()")),
Extractor(label='votes',
path="//div[@class='starbar-meta']/a[@href]",
attrs=Attribute(key='votes',
path=".//text()")),
Extractor(label='cover url',
path="//a[@name='poster']",
attrs=Attribute(key='cover url',
path="./img/@src"))
]
preprocessors = [
(re.compile(r'(<b class="blackcatheader">.+?</b>)', re.I),
r'</div><div>\1'),
('<small>Full cast and crew for<br></small>', ''),
('<td> </td>', '<td>...</td>'),
('<span class="tv-extra">TV mini-series</span>',
'<span class="tv-extra">(mini)</span>'),
(_reRolesMovie, _manageRoles),
(_reAkas, _replaceBR)]
def preprocess_dom(self, dom):
# Handle series information.
xpath = self.xpath(dom, "//b[text()='Series Crew']")
if xpath:
b = xpath[-1] # In doubt, take the last one.
for a in self.xpath(b, "./following::h5/a[@class='glossary']"):
name = a.get('name')
if name:
a.set('name', 'series %s' % name)
# Remove links to IMDbPro.
for proLink in self.xpath(dom, "//span[@class='pro-link']"):
proLink.drop_tree()
# Remove some 'more' links (keep others, like the one around
# the number of votes).
for tn15more in self.xpath(dom,
"//a[@class='tn15more'][starts-with(@href, '/title/')]"):
tn15more.drop_tree()
return dom
re_space = re.compile(r'\s+')
re_airdate = re.compile(r'(.*)\s*\(season (\d+), episode (\d+)\)', re.I)
def postprocess_data(self, data):
# Convert section names.
for sect in data.keys():
if sect in _SECT_CONV:
data[_SECT_CONV[sect]] = data[sect]
del data[sect]
sect = _SECT_CONV[sect]
# Filter out fake values.
for key in data:
value = data[key]
if isinstance(value, list) and value:
if isinstance(value[0], Person):
data[key] = filter(lambda x: x.personID is not None, value)
if isinstance(value[0], _Container):
for obj in data[key]:
obj.accessSystem = self._as
obj.modFunct = self._modFunct
if 'akas' in data or 'other akas' in data:
akas = data.get('akas') or []
other_akas = data.get('other akas') or []
akas += other_akas
if 'akas' in data:
del data['akas']
if 'other akas' in data:
del data['other akas']
if akas:
data['akas'] = akas
if 'runtimes' in data:
data['runtimes'] = [x.replace(' min', u'')
for x in data['runtimes']]
if 'production notes' in data:
pn = data['production notes'].replace('\n\nComments:',
'\nComments:').replace('\n\nNote:',
'\nNote:').replace('Note:\n\n',
'Note:\n').split('\n')
for k, v in zip(pn[::2], pn[1::2]):
v = v.strip()
if not v:
continue
k = k.lower().strip(':')
if k == 'note':
k = 'status note'
data[k] = v
del data['production notes']
if 'original air date' in data:
oid = self.re_space.sub(' ', data['original air date']).strip()
data['original air date'] = oid
aid = self.re_airdate.findall(oid)
if aid and len(aid[0]) == 3:
date, season, episode = aid[0]
date = date.strip()
try: season = int(season)
except: pass
try: episode = int(episode)
except: pass
if date and date != '????':
data['original air date'] = date
else:
del data['original air date']
# Handle also "episode 0".
if season or type(season) is type(0):
data['season'] = season
if episode or type(season) is type(0):
data['episode'] = episode
for k in ('writer', 'director'):
t_k = 'thin %s' % k
if t_k not in data:
continue
if k not in data:
data[k] = data[t_k]
del data[t_k]
if 'top/bottom rank' in data:
tbVal = data['top/bottom rank'].lower()
if tbVal.startswith('top'):
tbKey = 'top 250 rank'
tbVal = _toInt(tbVal, [('top 250: #', '')])
else:
tbKey = 'bottom 100 rank'
tbVal = _toInt(tbVal, [('bottom 100: #', '')])
if tbVal:
data[tbKey] = tbVal
del data['top/bottom rank']
if 'year' in data and data['year'] == '????':
del data['year']
if 'tv series link' in data:
if 'tv series title' in data:
data['episode of'] = Movie(title=data['tv series title'],
movieID=analyze_imdbid(
data['tv series link']),
accessSystem=self._as,
modFunct=self._modFunct)
del data['tv series title']
del data['tv series link']
if 'rating' in data:
try:
data['rating'] = float(data['rating'].replace('/10', ''))
except (TypeError, ValueError):
pass
if 'votes' in data:
try:
votes = data['votes'].replace(',', '').replace('votes', '')
data['votes'] = int(votes)
except (TypeError, ValueError):
pass
return data
def _process_plotsummary(x):
"""Process a plot (contributed by Rdian06)."""
xauthor = x.get('author')
if xauthor:
xauthor = xauthor.replace('{', '<').replace('}', '>').replace('(',
'<').replace(')', '>').strip()
xplot = x.get('plot', u'').strip()
if xauthor:
xplot += u'::%s' % xauthor
return xplot
class DOMHTMLPlotParser(DOMParserBase):
"""Parser for the "plot summary" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a 'plot' key, containing a list
of string with the structure: 'summary::summary_author <author@email>'.
Example:
pparser = HTMLPlotParser()
result = pparser.parse(plot_summary_html_string)
"""
_defGetRefs = True
# Notice that recently IMDb started to put the email of the
# author only in the link, that we're not collecting, here.
extractors = [Extractor(label='plot',
path="//p[@class='plotpar']",
attrs=Attribute(key='plot',
multi=True,
path={'plot': './text()',
'author': './i/a/text()'},
postprocess=_process_plotsummary))]
def _process_award(x):
award = {}
award['award'] = x.get('award').strip()
if not award['award']:
return {}
award['year'] = x.get('year').strip()
if award['year'] and award['year'].isdigit():
award['year'] = int(award['year'])
award['result'] = x.get('result').strip()
category = x.get('category').strip()
if category:
award['category'] = category
received_with = x.get('with')
if received_with is not None:
award['with'] = received_with.strip()
notes = x.get('notes')
if notes is not None:
notes = notes.strip()
if notes:
award['notes'] = notes
award['anchor'] = x.get('anchor')
return award
class DOMHTMLAwardsParser(DOMParserBase):
"""Parser for the "awards" page of a given person or movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
awparser = HTMLAwardsParser()
result = awparser.parse(awards_html_string)
"""
subject = 'title'
_containsObjects = True
extractors = [
Extractor(label='awards',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()][not(@colspan)]",
attrs=Attribute(key=None,
multi=True,
path={
'year': "../td[1]/a/text()",
'result': "../td[2]/b/text()",
'award': "../td[3]/text()",
'category': "./text()[1]",
# FIXME: takes only the first co-recipient
'with': "./small[starts-with(text()," \
" 'Shared with:')]/following-sibling::a[1]/text()",
'notes': "./small[last()]//text()",
'anchor': ".//text()"
},
postprocess=_process_award
)),
Extractor(label='recipients',
group="//table//big",
group_key="./a",
path="./ancestor::tr[1]/following-sibling::tr/" \
"td[last()]/small[1]/preceding-sibling::a",
attrs=Attribute(key=None,
multi=True,
path={
'name': "./text()",
'link': "./@href",
'anchor': "..//text()"
}
))
]
preprocessors = [
(re.compile('(<tr><td[^>]*>.*?</td></tr>\n\n</table>)', re.I),
r'\1</table>'),
(re.compile('(<tr><td[^>]*>\n\n<big>.*?</big></td></tr>)', re.I),
r'</table><table class="_imdbpy">\1'),
(re.compile('(<table[^>]*>\n\n)</table>(<table)', re.I), r'\1\2'),
(re.compile('(<small>.*?)<br>(.*?</small)', re.I), r'\1 \2'),
(re.compile('(</tr>\n\n)(<td)', re.I), r'\1<tr>\2')
]
def preprocess_dom(self, dom):
"""Repeat td elements according to their rowspan attributes
in subsequent tr elements.
"""
cols = self.xpath(dom, "//td[@rowspan]")
for col in cols:
span = int(col.get('rowspan'))
del col.attrib['rowspan']
position = len(self.xpath(col, "./preceding-sibling::td"))
row = col.getparent()
for tr in self.xpath(row, "./following-sibling::tr")[:span-1]:
# if not cloned, child will be moved to new parent
clone = self.clone(col)
# XXX: beware that here we don't use an "adapted" function,
# because both BeautifulSoup and lxml uses the same
# "insert" method.
tr.insert(position, clone)
return dom
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for key in data.keys():
dom = self.get_dom(key)
assigner = self.xpath(dom, "//a/text()")[0]
for entry in data[key]:
if not entry.has_key('name'):
if not entry:
continue
# this is an award, not a recipient
entry['assigner'] = assigner.strip()
# find the recipients
matches = [p for p in data[key]
if p.has_key('name') and (entry['anchor'] ==
p['anchor'])]
if self.subject == 'title':
recipients = [Person(name=recipient['name'],
personID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['to'] = recipients
elif self.subject == 'name':
recipients = [Movie(title=recipient['name'],
movieID=analyze_imdbid(recipient['link']))
for recipient in matches]
entry['for'] = recipients
nd.append(entry)
del entry['anchor']
return {'awards': nd}
class DOMHTMLTaglinesParser(DOMParserBase):
"""Parser for the "taglines" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTaglinesParser()
result = tparser.parse(taglines_html_string)
"""
extractors = [Extractor(label='taglines',
path="//div[@id='tn15content']/p",
attrs=Attribute(key='taglines', multi=True,
path="./text()"))]
class DOMHTMLKeywordsParser(DOMParserBase):
"""Parser for the "keywords" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
kwparser = DOMHTMLKeywordsParser()
result = kwparser.parse(keywords_html_string)
"""
extractors = [Extractor(label='keywords',
path="//a[starts-with(@href, '/keyword/')]",
attrs=Attribute(key='keywords',
path="./text()", multi=True,
postprocess=lambda x: \
x.lower().replace(' ', '-')))]
class DOMHTMLAlternateVersionsParser(DOMParserBase):
"""Parser for the "alternate versions" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//ul[@class='trivia']/li",
attrs=Attribute(key='alternate versions',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
class DOMHTMLTriviaParser(DOMParserBase):
"""Parser for the "trivia" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
avparser = HTMLAlternateVersionsParser()
result = avparser.parse(alternateversions_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='alternate versions',
path="//div[@class='sodatext']",
attrs=Attribute(key='trivia',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip()))]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//span[@class='linksoda']"):
qLink.drop_tree()
return dom
class DOMHTMLSoundtrackParser(DOMHTMLAlternateVersionsParser):
kind = 'soundtrack'
preprocessors = [
('<br>', '\n')
]
def postprocess_data(self, data):
if 'soundtrack' in data:
nd = []
for x in data['soundtrack']:
ds = x.split('\n')
title = ds[0]
if title[0] == '"' and title[-1] == '"':
title = title[1:-1]
nds = []
newData = {}
for l in ds[1:]:
if ' with ' in l or ' by ' in l or ' from ' in l \
or ' of ' in l or l.startswith('From '):
nds.append(l)
else:
if nds:
nds[-1] += l
else:
nds.append(l)
newData[title] = {}
for l in nds:
skip = False
for sep in ('From ',):
if l.startswith(sep):
fdix = len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
skip = True
if not skip:
for sep in ' with ', ' by ', ' from ', ' of ':
fdix = l.find(sep)
if fdix != -1:
fdix = fdix+len(sep)
kind = l[:fdix].rstrip().lower()
info = l[fdix:].lstrip()
newData[title][kind] = info
break
nd.append(newData)
data['soundtrack'] = nd
return data
class DOMHTMLCrazyCreditsParser(DOMParserBase):
"""Parser for the "crazy credits" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
ccparser = DOMHTMLCrazyCreditsParser()
result = ccparser.parse(crazycredits_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='crazy credits', path="//ul/li/tt",
attrs=Attribute(key='crazy credits', multi=True,
path=".//text()",
postprocess=lambda x: \
x.replace('\n', ' ').replace(' ', ' ')))]
class DOMHTMLGoofsParser(DOMParserBase):
"""Parser for the "goofs" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLGoofsParser()
result = gparser.parse(goofs_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='goofs', path="//ul[@class='trivia']/li",
attrs=Attribute(key='goofs', multi=True, path=".//text()",
postprocess=lambda x: (x or u'').strip()))]
class DOMHTMLQuotesParser(DOMParserBase):
"""Parser for the "memorable quotes" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLQuotesParser()
result = qparser.parse(quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='quotes',
path="//div[@class='_imdbpy']",
attrs=Attribute(key='quotes',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip().replace(' \n',
'::').replace('::\n', '::').replace('\n', ' ')))
]
preprocessors = [
(re.compile('(<a name="?qt[0-9]{7}"?></a>)', re.I),
r'\1<div class="_imdbpy">'),
(re.compile('<hr width="30%">', re.I), '</div>'),
(re.compile('<hr/>', re.I), '</div>'),
(re.compile('<script.*?</script>', re.I|re.S), ''),
# For BeautifulSoup.
(re.compile('<!-- sid: t-channel : MIDDLE_CENTER -->', re.I), '</div>')
]
def preprocess_dom(self, dom):
# Remove "link this quote" links.
for qLink in self.xpath(dom, "//p[@class='linksoda']"):
qLink.drop_tree()
return dom
def postprocess_data(self, data):
if 'quotes' not in data:
return {}
for idx, quote in enumerate(data['quotes']):
data['quotes'][idx] = quote.split('::')
return data
class DOMHTMLReleaseinfoParser(DOMParserBase):
"""Parser for the "release dates" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rdparser = DOMHTMLReleaseinfoParser()
result = rdparser.parse(releaseinfo_html_string)
"""
extractors = [Extractor(label='release dates',
path="//th[@class='xxxx']/../../tr",
attrs=Attribute(key='release dates', multi=True,
path={'country': ".//td[1]//text()",
'date': ".//td[2]//text()",
'notes': ".//td[3]//text()"})),
Extractor(label='akas',
path="//div[@class='_imdbpy_akas']/table/tr",
attrs=Attribute(key='akas', multi=True,
path={'title': "./td[1]/text()",
'countries': "./td[2]/text()"}))]
preprocessors = [
(re.compile('(<h5><a name="?akas"?.*</table>)', re.I | re.M | re.S),
r'<div class="_imdbpy_akas">\1</div>')]
def postprocess_data(self, data):
if not ('release dates' in data or 'akas' in data): return data
releases = data.get('release dates') or []
rl = []
for i in releases:
country = i.get('country')
date = i.get('date')
if not (country and date): continue
country = country.strip()
date = date.strip()
if not (country and date): continue
notes = i['notes']
info = u'%s::%s' % (country, date)
if notes:
info += notes
rl.append(info)
if releases:
del data['release dates']
if rl:
data['release dates'] = rl
akas = data.get('akas') or []
nakas = []
for aka in akas:
title = aka.get('title', '').strip()
if not title:
continue
countries = aka.get('countries', '').split('/')
if not countries:
nakas.append(title)
else:
for country in countries:
nakas.append('%s::%s' % (title, country.strip()))
if akas:
del data['akas']
if nakas:
data['akas from release info'] = nakas
return data
class DOMHTMLRatingsParser(DOMParserBase):
"""Parser for the "user ratings" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = DOMHTMLRatingsParser()
result = rparser.parse(userratings_html_string)
"""
re_means = re.compile('mean\s*=\s*([0-9]\.[0-9])\.\s*median\s*=\s*([0-9])',
re.I)
extractors = [
Extractor(label='number of votes',
path="//td[b='Percentage']/../../tr",
attrs=[Attribute(key='votes',
multi=True,
path={
'votes': "td[1]//text()",
'ordinal': "td[3]//text()"
})]),
Extractor(label='mean and median',
path="//p[starts-with(text(), 'Arithmetic mean')]",
attrs=Attribute(key='mean and median',
path="text()")),
Extractor(label='rating',
path="//a[starts-with(@href, '/search/title?user_rating=')]",
attrs=Attribute(key='rating',
path="text()")),
Extractor(label='demographic voters',
path="//td[b='Average']/../../tr",
attrs=Attribute(key='demographic voters',
multi=True,
path={
'voters': "td[1]//text()",
'votes': "td[2]//text()",
'average': "td[3]//text()"
})),
Extractor(label='top 250',
path="//a[text()='top 250']",
attrs=Attribute(key='top 250',
path="./preceding-sibling::text()[1]"))
]
def postprocess_data(self, data):
nd = {}
votes = data.get('votes', [])
if votes:
nd['number of votes'] = {}
for i in xrange(1, 11):
_ordinal = int(votes[i]['ordinal'])
_strvts = votes[i]['votes'] or '0'
nd['number of votes'][_ordinal] = \
int(_strvts.replace(',', ''))
mean = data.get('mean and median', '')
if mean:
means = self.re_means.findall(mean)
if means and len(means[0]) == 2:
am, med = means[0]
try: am = float(am)
except (ValueError, OverflowError): pass
if type(am) is type(1.0):
nd['arithmetic mean'] = am
try: med = int(med)
except (ValueError, OverflowError): pass
if type(med) is type(0):
nd['median'] = med
if 'rating' in data:
nd['rating'] = float(data['rating'])
dem_voters = data.get('demographic voters')
if dem_voters:
nd['demographic'] = {}
for i in xrange(1, len(dem_voters)):
if (dem_voters[i]['votes'] is not None) \
and (dem_voters[i]['votes'].strip()):
nd['demographic'][dem_voters[i]['voters'].strip().lower()] \
= (int(dem_voters[i]['votes'].replace(',', '')),
float(dem_voters[i]['average']))
if 'imdb users' in nd.get('demographic', {}):
nd['votes'] = nd['demographic']['imdb users'][0]
nd['demographic']['all votes'] = nd['demographic']['imdb users']
del nd['demographic']['imdb users']
top250 = data.get('top 250')
if top250:
sd = top250[9:]
i = sd.find(' ')
if i != -1:
sd = sd[:i]
try: sd = int(sd)
except (ValueError, OverflowError): pass
if type(sd) is type(0):
nd['top 250 rank'] = sd
return nd
class DOMHTMLEpisodesRatings(DOMParserBase):
"""Parser for the "episode ratings ... by date" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
erparser = DOMHTMLEpisodesRatings()
result = erparser.parse(eprating_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='title', path="//title",
attrs=Attribute(key='title', path="./text()")),
Extractor(label='ep ratings',
path="//th/../..//tr",
attrs=Attribute(key='episodes', multi=True,
path={'nr': ".//td[1]/text()",
'ep title': ".//td[2]//text()",
'movieID': ".//td[2]/a/@href",
'rating': ".//td[3]/text()",
'votes': ".//td[4]/text()"}))]
def postprocess_data(self, data):
if 'title' not in data or 'episodes' not in data: return {}
nd = []
title = data['title']
for i in data['episodes']:
ept = i['ep title']
movieID = analyze_imdbid(i['movieID'])
votes = i['votes']
rating = i['rating']
if not (ept and movieID and votes and rating): continue
try:
votes = int(votes.replace(',', '').replace('.', ''))
except:
pass
try:
rating = float(rating)
except:
pass
ept = ept.strip()
ept = u'%s {%s' % (title, ept)
nr = i['nr']
if nr:
ept += u' (#%s)' % nr.strip()
ept += '}'
if movieID is not None:
movieID = str(movieID)
m = Movie(title=ept, movieID=movieID, accessSystem=self._as,
modFunct=self._modFunct)
epofdict = m.get('episode of')
if epofdict is not None:
m['episode of'] = Movie(data=epofdict, accessSystem=self._as,
modFunct=self._modFunct)
nd.append({'episode': m, 'votes': votes, 'rating': rating})
return {'episodes rating': nd}
def _normalize_href(href):
if (href is not None) and (not href.lower().startswith('http://')):
if href.startswith('/'): href = href[1:]
href = '%s%s' % (imdbURL_base, href)
return href
class DOMHTMLOfficialsitesParser(DOMParserBase):
"""Parser for the "official sites", "external reviews", "newsgroup
reviews", "miscellaneous links", "sound clips", "video clips" and
"photographs" pages of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
osparser = DOMHTMLOfficialsitesParser()
result = osparser.parse(officialsites_html_string)
"""
kind = 'official sites'
extractors = [
Extractor(label='site',
path="//ol/li/a",
attrs=Attribute(key='self.kind',
multi=True,
path={
'link': "./@href",
'info': "./text()"
},
postprocess=lambda x: (x.get('info').strip(),
urllib.unquote(_normalize_href(x.get('link'))))))
]
class DOMHTMLConnectionParser(DOMParserBase):
"""Parser for the "connections" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
connparser = DOMHTMLConnectionParser()
result = connparser.parse(connections_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='connection',
group="//div[@class='_imdbpy']",
group_key="./h5/text()",
group_key_normalize=lambda x: x.lower(),
path="./a",
attrs=Attribute(key=None,
path={'title': "./text()",
'movieID': "./@href"},
multi=True))]
preprocessors = [
('<h5>', '</div><div class="_imdbpy"><h5>'),
# To get the movie's year.
('</a> (', ' ('),
('\n<br/>', '</a>'),
('<br/> - ', '::')
]
def postprocess_data(self, data):
for key in data.keys():
nl = []
for v in data[key]:
title = v['title']
ts = title.split('::', 1)
title = ts[0].strip()
notes = u''
if len(ts) == 2:
notes = ts[1].strip()
m = Movie(title=title,
movieID=analyze_imdbid(v['movieID']),
accessSystem=self._as, notes=notes,
modFunct=self._modFunct)
nl.append(m)
data[key] = nl
if not data: return {}
return {'connections': data}
class DOMHTMLLocationsParser(DOMParserBase):
"""Parser for the "locations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
lparser = DOMHTMLLocationsParser()
result = lparser.parse(locations_html_string)
"""
extractors = [Extractor(label='locations', path="//dt",
attrs=Attribute(key='locations', multi=True,
path={'place': ".//text()",
'note': "./following-sibling::dd[1]" \
"//text()"},
postprocess=lambda x: (u'%s::%s' % (
x['place'].strip(),
(x['note'] or u'').strip())).strip(':')))]
class DOMHTMLTechParser(DOMParserBase):
"""Parser for the "technical", "business", "literature",
"publicity" (for people) and "contacts (for people) pages of
a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = HTMLTechParser()
result = tparser.parse(technical_html_string)
"""
kind = 'tech'
extractors = [Extractor(label='tech',
group="//h5",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip()
for t in x.split('\n') if t.strip()]))]
preprocessors = [
(re.compile('(<h5>.*?</h5>)', re.I), r'\1<div class="_imdbpy">'),
(re.compile('((<br/>|</p>|</table>))\n?<br/>(?!<a)', re.I),
r'\1</div>'),
# the ones below are for the publicity parser
(re.compile('<p>(.*?)</p>', re.I), r'\1<br/>'),
(re.compile('(</td><td valign="top">)', re.I), r'\1::'),
(re.compile('(</tr><tr>)', re.I), r'\n\1'),
# this is for splitting individual entries
(re.compile('<br/>', re.I), r'\n'),
]
def postprocess_data(self, data):
for key in data:
data[key] = filter(None, data[key])
if self.kind in ('literature', 'business', 'contacts') and data:
if 'screenplay/teleplay' in data:
data['screenplay-teleplay'] = data['screenplay/teleplay']
del data['screenplay/teleplay']
data = {self.kind: data}
else:
if self.kind == 'publicity':
if 'biography (print)' in data:
data['biography-print'] = data['biography (print)']
del data['biography (print)']
# Tech info.
for key in data.keys():
if key.startswith('film negative format'):
data['film negative format'] = data[key]
del data[key]
elif key.startswith('film length'):
data['film length'] = data[key]
del data[key]
return data
class DOMHTMLDvdParser(DOMParserBase):
"""Parser for the "dvd" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
dparser = DOMHTMLDvdParser()
result = dparser.parse(dvd_html_string)
"""
_defGetRefs = True
extractors = [Extractor(label='dvd',
path="//div[@class='base_layer']",
attrs=[Attribute(key=None,
multi=True,
path={
'title': "../table[1]//h3/text()",
'cover': "../table[1]//img/@src",
'region': ".//p[b='Region:']/text()",
'asin': ".//p[b='ASIN:']/text()",
'upc': ".//p[b='UPC:']/text()",
'rating': ".//p/b[starts-with(text(), 'Rating:')]/../img/@alt",
'certificate': ".//p[b='Certificate:']/text()",
'runtime': ".//p[b='Runtime:']/text()",
'label': ".//p[b='Label:']/text()",
'studio': ".//p[b='Studio:']/text()",
'release date': ".//p[b='Release Date:']/text()",
'dvd format': ".//p[b='DVD Format:']/text()",
'dvd features': ".//p[b='DVD Features: ']//text()",
'supplements': "..//div[span='Supplements']" \
"/following-sibling::div[1]//text()",
'review': "..//div[span='Review']/following-sibling::div[1]//text()",
'titles': "..//div[starts-with(text(), 'Titles in this Product')]" \
"/..//text()",
},
postprocess=lambda x: {
'title': (x.get('title') or u'').strip(),
'cover': (x.get('cover') or u'').strip(),
'region': (x.get('region') or u'').strip(),
'asin': (x.get('asin') or u'').strip(),
'upc': (x.get('upc') or u'').strip(),
'rating': (x.get('rating') or u'Not Rated').strip().replace('Rating: ', ''),
'certificate': (x.get('certificate') or u'').strip(),
'runtime': (x.get('runtime') or u'').strip(),
'label': (x.get('label') or u'').strip(),
'studio': (x.get('studio') or u'').strip(),
'release date': (x.get('release date') or u'').strip(),
'dvd format': (x.get('dvd format') or u'').strip(),
'dvd features': (x.get('dvd features') or u'').strip().replace('DVD Features: ', ''),
'supplements': (x.get('supplements') or u'').strip(),
'review': (x.get('review') or u'').strip(),
'titles in this product': (x.get('titles') or u'').strip().replace('Titles in this Product::', ''),
}
)])]
preprocessors = [
(re.compile('<p>(<table class="dvd_section" .*)</p>\s*<hr\s*/>', re.I),
r'<div class="_imdbpy">\1</div>'),
(re.compile('<p>(<div class\s*=\s*"base_layer")', re.I), r'\1'),
(re.compile('</p>\s*<p>(<div class="dvd_section")', re.I), r'\1'),
(re.compile('</div><div class="dvd_row(_alt)?">', re.I), r'::')
]
def postprocess_data(self, data):
if not data:
return data
dvds = data['dvd']
for dvd in dvds:
if dvd['cover'].find('noposter') != -1:
del dvd['cover']
for key in dvd.keys():
if not dvd[key]:
del dvd[key]
if 'supplements' in dvd:
dvd['supplements'] = dvd['supplements'].split('::')
return data
class DOMHTMLRecParser(DOMParserBase):
"""Parser for the "recommendations" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
rparser = HTMLRecParser()
result = rparser.parse(recommendations_html_string)
"""
_containsObjects = True
extractors = [Extractor(label='recommendations',
path="//td[@valign='middle'][1]",
attrs=Attribute(key='../../tr/td[1]//text()',
multi=True,
path={'title': ".//text()",
'movieID': ".//a/@href"}))]
def postprocess_data(self, data):
for key in data.keys():
n_key = key
n_keyl = n_key.lower()
if n_keyl == 'suggested by the database':
n_key = 'database'
elif n_keyl == 'imdb users recommend':
n_key = 'users'
data[n_key] = [Movie(title=x['title'],
movieID=analyze_imdbid(x['movieID']),
accessSystem=self._as, modFunct=self._modFunct)
for x in data[key]]
del data[key]
if data: return {'recommendations': data}
return data
class DOMHTMLNewsParser(DOMParserBase):
"""Parser for the "news" page of a given movie or person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
nwparser = DOMHTMLNewsParser()
result = nwparser.parse(news_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='news',
path="//h2",
attrs=Attribute(key='news',
multi=True,
path={
'title': "./text()",
'fromdate': "../following-sibling::p[1]/small//text()",
# FIXME: sometimes (see The Matrix (1999)) <p> is found
# inside news text.
'body': "../following-sibling::p[2]//text()",
'link': "../..//a[text()='Permalink']/@href",
'fulllink': "../..//a[starts-with(text(), " \
"'See full article at')]/@href"
},
postprocess=lambda x: {
'title': x.get('title').strip(),
'date': x.get('fromdate').split('|')[0].strip(),
'from': x.get('fromdate').split('|')[1].replace('From ',
'').strip(),
'body': (x.get('body') or u'').strip(),
'link': _normalize_href(x.get('link')),
'full article link': _normalize_href(x.get('fulllink'))
}))
]
preprocessors = [
(re.compile('(<a name=[^>]+><h2>)', re.I), r'<div class="_imdbpy">\1'),
(re.compile('(<hr/>)', re.I), r'</div>\1'),
(re.compile('<p></p>', re.I), r'')
]
def postprocess_data(self, data):
if not data.has_key('news'):
return {}
for news in data['news']:
if news.has_key('full article link'):
if news['full article link'] is None:
del news['full article link']
return data
def _parse_review(x):
result = {}
title = x.get('title').strip()
if title[-1] == ':': title = title[:-1]
result['title'] = title
result['link'] = _normalize_href(x.get('link'))
kind = x.get('kind').strip()
if kind[-1] == ':': kind = kind[:-1]
result['review kind'] = kind
text = x.get('review').replace('\n\n', '||').replace('\n', ' ').split('||')
review = '\n'.join(text)
if x.get('author') is not None:
author = x.get('author').strip()
review = review.split(author)[0].strip()
result['review author'] = author[2:]
if x.get('item') is not None:
item = x.get('item').strip()
review = review[len(item):].strip()
review = "%s: %s" % (item, review)
result['review'] = review
return result
class DOMHTMLAmazonReviewsParser(DOMParserBase):
"""Parser for the "amazon reviews" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
arparser = DOMHTMLAmazonReviewsParser()
result = arparser.parse(amazonreviews_html_string)
"""
extractors = [
Extractor(label='amazon reviews',
group="//h3",
group_key="./a/text()",
group_key_normalize=lambda x: x[:-1],
path="./following-sibling::p[1]/span[@class='_review']",
attrs=Attribute(key=None,
multi=True,
path={
'title': "../preceding-sibling::h3[1]/a[1]/text()",
'link': "../preceding-sibling::h3[1]/a[1]/@href",
'kind': "./preceding-sibling::b[1]/text()",
'item': "./i/b/text()",
'review': ".//text()",
'author': "./i[starts-with(text(), '--')]/text()"
},
postprocess=_parse_review))
]
preprocessors = [
(re.compile('<p>\n(?!<b>)', re.I), r'\n'),
(re.compile('(\n</b>\n)', re.I), r'\1<span class="_review">'),
(re.compile('(</p>\n\n)', re.I), r'</span>\1'),
(re.compile('(\s\n)(<i><b>)', re.I), r'</span>\1<span class="_review">\2')
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = []
for item in data.keys():
nd = nd + data[item]
return {'amazon reviews': nd}
def _parse_merchandising_link(x):
result = {}
link = x.get('link')
result['link'] = _normalize_href(link)
text = x.get('text')
if text is not None:
result['link-text'] = text.strip()
cover = x.get('cover')
if cover is not None:
result['cover'] = cover
description = x.get('description')
if description is not None:
shop = x.get('shop')
if shop is not None:
result['description'] = u'%s::%s' % (shop, description.strip())
else:
result['description'] = description.strip()
return result
class DOMHTMLSalesParser(DOMParserBase):
"""Parser for the "merchandising links" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSalesParser()
result = sparser.parse(sales_html_string)
"""
extractors = [
Extractor(label='shops',
group="//h5/a[@name]/..",
group_key="./a[1]/text()",
group_key_normalize=lambda x: x.lower(),
path=".//following-sibling::table[1]/" \
"/td[@class='w_rowtable_colshop']//tr[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./td[2]/a[1]/@href",
'text': "./td[1]/img[1]/@alt",
'cover': "./ancestor::td[1]/../td[1]"\
"/a[1]/img[1]/@src",
},
postprocess=_parse_merchandising_link)),
Extractor(label='others',
group="//span[@class='_info']/..",
group_key="./h5/a[1]/text()",
group_key_normalize=lambda x: x.lower(),
path="./span[@class='_info']",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./preceding-sibling::a[1]/@href",
'shop': "./preceding-sibling::a[1]/text()",
'description': ".//text()",
},
postprocess=_parse_merchandising_link))
]
preprocessors = [
(re.compile('(<h5><a name=)', re.I), r'</div><div class="_imdbpy">\1'),
(re.compile('(</h5>\n<br/>\n)</div>', re.I), r'\1'),
(re.compile('(<br/><br/>\n)(\n)', re.I), r'\1</div>\2'),
(re.compile('(\n)(Search.*?)(</a>)(\n)', re.I), r'\3\1\2\4'),
(re.compile('(\n)(Search.*?)(\n)', re.I),
r'\1<span class="_info">\2</span>\3')
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
return {'merchandising links': data}
def _build_episode(x):
"""Create a Movie object for a given series' episode."""
episode_id = analyze_imdbid(x.get('link'))
episode_title = x.get('title')
e = Movie(movieID=episode_id, title=episode_title)
e['kind'] = u'episode'
oad = x.get('oad')
if oad:
e['original air date'] = oad.strip()
year = x.get('year')
if year is not None:
year = year[5:]
if year == 'unknown': year = u'????'
if year and year.isdigit():
year = int(year)
e['year'] = year
else:
if oad and oad[-4:].isdigit():
e['year'] = int(oad[-4:])
epinfo = x.get('episode')
if epinfo is not None:
season, episode = epinfo.split(':')[0].split(',')
e['season'] = int(season[7:])
e['episode'] = int(episode[8:])
else:
e['season'] = 'unknown'
e['episode'] = 'unknown'
plot = x.get('plot')
if plot:
e['plot'] = plot.strip()
return e
class DOMHTMLEpisodesParser(DOMParserBase):
"""Parser for the "episode list" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
_containsObjects = True
kind = 'episodes list'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::span/strong[1]/text()"
def _init(self):
self.extractors = [
Extractor(label='series',
path="//html",
attrs=[Attribute(key='series title',
path=".//title/text()"),
Attribute(key='series movieID',
path=".//h1/a[@class='main']/@href",
postprocess=analyze_imdbid)
]),
Extractor(label='episodes',
group="//div[@class='_imdbpy']/h3",
group_key="./a/@name",
path=self._episodes_path,
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a/@href",
'title': "./a/text()",
'year': "./preceding-sibling::a[1]/@name",
'episode': "./text()[1]",
'oad': self._oad_path,
'plot': "./following-sibling::text()[1]"
},
postprocess=_build_episode))]
if self.kind == 'episodes cast':
self.extractors += [
Extractor(label='cast',
group="//h4",
group_key="./text()[1]",
group_key_normalize=lambda x: x.strip(),
path="./following-sibling::table[1]//td[@class='nm']",
attrs=Attribute(key=None,
multi=True,
path={'person': "..//text()",
'link': "./a/@href",
'roleID': \
"../td[4]/div[@class='_imdbpyrole']/@roleid"},
postprocess=lambda x: \
build_person(x.get('person') or u'',
personID=analyze_imdbid(x.get('link')),
roleID=(x.get('roleID') or u'').split('/'),
accessSystem=self._as,
modFunct=self._modFunct)))
]
preprocessors = [
(re.compile('(<hr/>\n)(<h3>)', re.I),
r'</div>\1<div class="_imdbpy">\2'),
(re.compile('(</p>\n\n)</div>', re.I), r'\1'),
(re.compile('<h3>(.*?)</h3>', re.I), r'<h4>\1</h4>'),
(_reRolesMovie, _manageRoles),
(re.compile('(<br/> <br/>\n)(<hr/>)', re.I), r'\1</div>\2')
]
def postprocess_data(self, data):
# A bit extreme?
if not 'series title' in data: return {}
if not 'series movieID' in data: return {}
stitle = data['series title'].replace('- Episode list', '')
stitle = stitle.replace('- Episodes list', '')
stitle = stitle.replace('- Episode cast', '')
stitle = stitle.replace('- Episodes cast', '')
stitle = stitle.strip()
if not stitle: return {}
seriesID = data['series movieID']
if seriesID is None: return {}
series = Movie(title=stitle, movieID=str(seriesID),
accessSystem=self._as, modFunct=self._modFunct)
nd = {}
for key in data.keys():
if key.startswith('season-'):
season_key = key[7:]
try: season_key = int(season_key)
except: pass
nd[season_key] = {}
ep_counter = 1
for episode in data[key]:
if not episode: continue
episode_key = episode.get('episode')
if episode_key is None: continue
if not isinstance(episode_key, int):
episode_key = ep_counter
ep_counter += 1
cast_key = 'Season %s, Episode %s:' % (season_key,
episode_key)
if data.has_key(cast_key):
cast = data[cast_key]
for i in xrange(len(cast)):
cast[i].billingPos = i + 1
episode['cast'] = cast
episode['episode of'] = series
nd[season_key][episode_key] = episode
if len(nd) == 0:
return {}
return {'episodes': nd}
class DOMHTMLEpisodesCastParser(DOMHTMLEpisodesParser):
"""Parser for the "episodes cast" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
eparser = DOMHTMLEpisodesParser()
result = eparser.parse(episodes_html_string)
"""
kind = 'episodes cast'
_episodes_path = "..//h4"
_oad_path = "./following-sibling::b[1]/text()"
class DOMHTMLFaqsParser(DOMParserBase):
"""Parser for the "FAQ" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
fparser = DOMHTMLFaqsParser()
result = fparser.parse(faqs_html_string)
"""
_defGetRefs = True
# XXX: bsoup and lxml don't match (looks like a minor issue, anyway).
extractors = [
Extractor(label='faqs',
path="//div[@class='section']",
attrs=Attribute(key='faqs',
multi=True,
path={
'question': "./h3/a/span/text()",
'answer': "../following-sibling::div[1]//text()"
},
postprocess=lambda x: u'%s::%s' % (x.get('question').strip(),
'\n\n'.join(x.get('answer').replace(
'\n\n', '\n').strip().split('||')))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('<h4>(.*?)</h4>\n', re.I), r'||\1--'),
(re.compile('<span class="spoiler"><span>(.*?)</span></span>', re.I),
r'[spoiler]\1[/spoiler]')
]
class DOMHTMLAiringParser(DOMParserBase):
"""Parser for the "airing" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
aparser = DOMHTMLAiringParser()
result = aparser.parse(airing_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='series title',
path="//title",
attrs=Attribute(key='series title', path="./text()",
postprocess=lambda x: \
x.replace(' - TV schedule', u''))),
Extractor(label='series id',
path="//h1/a[@href]",
attrs=Attribute(key='series id', path="./@href")),
Extractor(label='tv airings',
path="//tr[@class]",
attrs=Attribute(key='airing',
multi=True,
path={
'date': "./td[1]//text()",
'time': "./td[2]//text()",
'channel': "./td[3]//text()",
'link': "./td[4]/a[1]/@href",
'title': "./td[4]//text()",
'season': "./td[5]//text()",
},
postprocess=lambda x: {
'date': x.get('date'),
'time': x.get('time'),
'channel': x.get('channel').strip(),
'link': x.get('link'),
'title': x.get('title'),
'season': (x.get('season') or '').strip()
}
))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
seriesTitle = data['series title']
seriesID = analyze_imdbid(data['series id'])
if data.has_key('airing'):
for airing in data['airing']:
title = airing.get('title', '').strip()
if not title:
epsTitle = seriesTitle
if seriesID is None:
continue
epsID = seriesID
else:
epsTitle = '%s {%s}' % (data['series title'],
airing['title'])
epsID = analyze_imdbid(airing['link'])
e = Movie(title=epsTitle, movieID=epsID)
airing['episode'] = e
del airing['link']
del airing['title']
if not airing['season']:
del airing['season']
if 'series title' in data:
del data['series title']
if 'series id' in data:
del data['series id']
if 'airing' in data:
data['airing'] = filter(None, data['airing'])
if 'airing' not in data or not data['airing']:
return {}
return data
class DOMHTMLSynopsisParser(DOMParserBase):
"""Parser for the "synopsis" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = HTMLSynopsisParser()
result = sparser.parse(synopsis_html_string)
"""
extractors = [
Extractor(label='synopsis',
path="//div[@class='display'][not(@style)]",
attrs=Attribute(key='synopsis',
path=".//text()",
postprocess=lambda x: '\n\n'.join(x.strip().split('||'))))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
class DOMHTMLParentsGuideParser(DOMParserBase):
"""Parser for the "parents guide" page of a given movie.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
pgparser = HTMLParentsGuideParser()
result = pgparser.parse(parentsguide_html_string)
"""
extractors = [
Extractor(label='parents guide',
group="//div[@class='section']",
group_key="./h3/a/span/text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::div[1]/p",
attrs=Attribute(key=None,
path=".//text()",
postprocess=lambda x: [t.strip().replace('\n', ' ')
for t in x.split('||') if t.strip()]))
]
preprocessors = [
(re.compile('<br/><br/>', re.I), r'||')
]
def postprocess_data(self, data):
data2 = {}
for key in data:
if data[key]:
data2[key] = data[key]
if not data2:
return {}
return {'parents guide': data2}
_OBJECTS = {
'movie_parser': ((DOMHTMLMovieParser,), None),
'plot_parser': ((DOMHTMLPlotParser,), None),
'movie_awards_parser': ((DOMHTMLAwardsParser,), None),
'taglines_parser': ((DOMHTMLTaglinesParser,), None),
'keywords_parser': ((DOMHTMLKeywordsParser,), None),
'crazycredits_parser': ((DOMHTMLCrazyCreditsParser,), None),
'goofs_parser': ((DOMHTMLGoofsParser,), None),
'alternateversions_parser': ((DOMHTMLAlternateVersionsParser,), None),
'trivia_parser': ((DOMHTMLTriviaParser,), None),
'soundtrack_parser': ((DOMHTMLSoundtrackParser,), {'kind': 'soundtrack'}),
'quotes_parser': ((DOMHTMLQuotesParser,), None),
'releasedates_parser': ((DOMHTMLReleaseinfoParser,), None),
'ratings_parser': ((DOMHTMLRatingsParser,), None),
'officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'externalrev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'external reviews'}),
'newsgrouprev_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'newsgroup reviews'}),
'misclinks_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'misc links'}),
'soundclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'sound clips'}),
'videoclips_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'video clips'}),
'photosites_parser': ((DOMHTMLOfficialsitesParser,),
{'kind': 'photo sites'}),
'connections_parser': ((DOMHTMLConnectionParser,), None),
'tech_parser': ((DOMHTMLTechParser,), None),
'business_parser': ((DOMHTMLTechParser,),
{'kind': 'business', '_defGetRefs': 1}),
'literature_parser': ((DOMHTMLTechParser,), {'kind': 'literature'}),
'locations_parser': ((DOMHTMLLocationsParser,), None),
'dvd_parser': ((DOMHTMLDvdParser,), None),
'rec_parser': ((DOMHTMLRecParser,), None),
'news_parser': ((DOMHTMLNewsParser,), None),
'amazonrev_parser': ((DOMHTMLAmazonReviewsParser,), None),
'sales_parser': ((DOMHTMLSalesParser,), None),
'episodes_parser': ((DOMHTMLEpisodesParser,), None),
'episodes_cast_parser': ((DOMHTMLEpisodesCastParser,), None),
'eprating_parser': ((DOMHTMLEpisodesRatings,), None),
'movie_faqs_parser': ((DOMHTMLFaqsParser,), None),
'airing_parser': ((DOMHTMLAiringParser,), None),
'synopsis_parser': ((DOMHTMLSynopsisParser,), None),
'parentsguide_parser': ((DOMHTMLParentsGuideParser,), None)
}
| [
[
8,
0,
0.0074,
0.0142,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0153,
0.0005,
0,
0.66,
0.0196,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0158,
0.0005,
0,
0.66... | [
"\"\"\"\nparser.http.movieParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse the\nIMDb pages on the akas.imdb.com server about a movie.\nE.g., for Brian De Palma's \"The Untouchables\", the referred\npages would be:\n combined details: http://akas.imdb.com/titl... |
"""
parser.http.personParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a person.
E.g., for "Mel Gibson" the referred pages would be:
categorized: http://akas.imdb.com/name/nm0000154/maindetails
biography: http://akas.imdb.com/name/nm0000154/bio
...and so on...
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.Movie import Movie
from imdb.utils import analyze_name, canonicalName, normalizeName, \
analyze_title, date_and_notes
from utils import build_movie, DOMParserBase, Attribute, Extractor, \
analyze_imdbid
from movieParser import _manageRoles
_reRoles = re.compile(r'(<li>.*? \.\.\.\. )(.*?)(</li>|<br>)',
re.I | re.M | re.S)
def build_date(date):
day = date.get('day')
year = date.get('year')
if day and year:
return "%s %s" % (day, year)
if day:
return day
if year:
return year
return ""
class DOMHTMLMaindetailsParser(DOMParserBase):
"""Parser for the "categorized" (maindetails) page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
cparser = DOMHTMLMaindetailsParser()
result = cparser.parse(categorized_html_string)
"""
_containsObjects = True
_birth_attrs = [Attribute(key='birth date',
path={
'day': ".//a[starts-with(@href, " \
"'/date/')]/text()",
'year': ".//a[starts-with(@href, " \
"'/search/name?birth_year=')]/text()"
},
postprocess=build_date),
Attribute(key='birth place',
path=".//a[starts-with(@href, " \
"'/search/name?birth_place=')]/text()")]
_death_attrs = [Attribute(key='death date',
path={
'day': ".//a[starts-with(@href, " \
"'/date/')]/text()",
'year': ".//a[starts-with(@href, " \
"'/search/name?death_year=')]/text()"
},
postprocess=build_date),
Attribute(key='death place',
path=".//a[starts-with(@href, " \
"'/search/name?death_place=')]/text()")]
_film_attrs = [Attribute(key=None,
multi=True,
path={
'link': "./b/a[1]/@href",
'title': "./b/a[1]/text()",
'notes': "./b/following-sibling::text()",
'year': "./span[@class='year_column']/text()",
'status': "./a[@class='in_production']/text()",
'rolesNoChar': './/br/following-sibling::text()',
'chrRoles': "./a[@imdbpyname]/@imdbpyname",
'roleID': "./a[starts-with(@href, '/character/')]/@href"
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
year=x.get('year'),
movieID=analyze_imdbid(x.get('link') or u''),
rolesNoChar=(x.get('rolesNoChar') or u'').strip(),
chrRoles=(x.get('chrRoles') or u'').strip(),
additionalNotes=x.get('notes'),
roleID=(x.get('roleID') or u''),
status=x.get('status') or None))]
extractors = [
Extractor(label='name',
path="//h1[@class='header']",
attrs=Attribute(key='name',
path=".//text()",
postprocess=lambda x: analyze_name(x,
canonical=1))),
Extractor(label='birth info',
path="//div[h4='Born:']",
attrs=_birth_attrs),
Extractor(label='death info',
path="//div[h4='Died:']",
attrs=_death_attrs),
Extractor(label='headshot',
path="//td[@id='img_primary']/a",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='akas',
path="//div[h4='Alternate Names:']",
attrs=Attribute(key='akas',
path="./text()",
postprocess=lambda x: x.strip().split(' '))),
Extractor(label='filmography',
group="//div[starts-with(@id, 'filmo-head-')]",
group_key="./a[@name]/text()",
group_key_normalize=lambda x: x.lower().replace(': ', ' '),
path="./following-sibling::div[1]" \
"/div[starts-with(@class, 'filmo-row')]",
attrs=_film_attrs),
Extractor(label='indevelopment',
path="//div[starts-with(@class,'devitem')]",
attrs=Attribute(key='in development',
multi=True,
path={
'link': './a/@href',
'title': './a/text()'
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
movieID=analyze_imdbid(x.get('link') or u''),
roleID=(x.get('roleID') or u'').split('/'),
status=x.get('status') or None)))
]
preprocessors = [('<div class="clear"/> </div>', ''),
('<br/>', '<br />'),
(re.compile(r'(<a href="/character/ch[0-9]{7}")>(.*?)</a>'),
r'\1 imdbpyname="\2@@">\2</a>')]
def postprocess_data(self, data):
for what in 'birth date', 'death date':
if what in data and not data[what]:
del data[what]
# XXX: the code below is for backwards compatibility
# probably could be removed
for key in data.keys():
if key.startswith('actor '):
if not data.has_key('actor'):
data['actor'] = []
data['actor'].extend(data[key])
del data[key]
if key.startswith('actress '):
if not data.has_key('actress'):
data['actress'] = []
data['actress'].extend(data[key])
del data[key]
if key.startswith('self '):
if not data.has_key('self'):
data['self'] = []
data['self'].extend(data[key])
del data[key]
if key == 'birth place':
data['birth notes'] = data[key]
del data[key]
if key == 'death place':
data['death notes'] = data[key]
del data[key]
return data
class DOMHTMLBioParser(DOMParserBase):
"""Parser for the "biography" page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bioparser = DOMHTMLBioParser()
result = bioparser.parse(biography_html_string)
"""
_defGetRefs = True
_birth_attrs = [Attribute(key='birth date',
path={
'day': "./a[starts-with(@href, " \
"'/date/')]/text()",
'year': "./a[starts-with(@href, " \
"'/search/name?birth_year=')]/text()"
},
postprocess=build_date),
Attribute(key='birth notes',
path="./a[starts-with(@href, " \
"'/search/name?birth_place=')]/text()")]
_death_attrs = [Attribute(key='death date',
path={
'day': "./a[starts-with(@href, " \
"'/date/')]/text()",
'year': "./a[starts-with(@href, " \
"'/search/name?death_date=')]/text()"
},
postprocess=build_date),
Attribute(key='death notes',
path="./text()",
# TODO: check if this slicing is always correct
postprocess=lambda x: u''.join(x).strip()[2:])]
extractors = [
Extractor(label='headshot',
path="//a[@name='headshot']",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='birth info',
path="//div[h5='Date of Birth']",
attrs=_birth_attrs),
Extractor(label='death info',
path="//div[h5='Date of Death']",
attrs=_death_attrs),
Extractor(label='nick names',
path="//div[h5='Nickname']",
attrs=Attribute(key='nick names',
path="./text()",
joiner='|',
postprocess=lambda x: [n.strip().replace(' (',
'::(', 1) for n in x.split('|')
if n.strip()])),
Extractor(label='birth name',
path="//div[h5='Birth Name']",
attrs=Attribute(key='birth name',
path="./text()",
postprocess=lambda x: canonicalName(x.strip()))),
Extractor(label='height',
path="//div[h5='Height']",
attrs=Attribute(key='height',
path="./text()",
postprocess=lambda x: x.strip())),
Extractor(label='mini biography',
path="//div[h5='Mini Biography']",
attrs=Attribute(key='mini biography',
multi=True,
path={
'bio': "./p//text()",
'by': "./b/following-sibling::a/text()"
},
postprocess=lambda x: "%s::%s" % \
(x.get('bio').strip(),
(x.get('by') or u'').strip() or u'Anonymous'))),
Extractor(label='spouse',
path="//div[h5='Spouse']/table/tr",
attrs=Attribute(key='spouse',
multi=True,
path={
'name': "./td[1]//text()",
'info': "./td[2]//text()"
},
postprocess=lambda x: ("%s::%s" % \
(x.get('name').strip(),
(x.get('info') or u'').strip())).strip(':'))),
Extractor(label='trade mark',
path="//div[h5='Trade Mark']/p",
attrs=Attribute(key='trade mark',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='trivia',
path="//div[h5='Trivia']/p",
attrs=Attribute(key='trivia',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='quotes',
path="//div[h5='Personal Quotes']/p",
attrs=Attribute(key='quotes',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='salary',
path="//div[h5='Salary']/table/tr",
attrs=Attribute(key='salary history',
multi=True,
path={
'title': "./td[1]//text()",
'info': "./td[2]/text()",
},
postprocess=lambda x: "%s::%s" % \
(x.get('title').strip(),
x.get('info').strip()))),
Extractor(label='where now',
path="//div[h5='Where Are They Now']/p",
attrs=Attribute(key='where now',
multi=True,
path=".//text()",
postprocess=lambda x: x.strip())),
]
preprocessors = [
(re.compile('(<h5>)', re.I), r'</div><div class="_imdbpy">\1'),
(re.compile('(</table>\n</div>\s+)</div>', re.I + re.DOTALL), r'\1'),
(re.compile('(<div id="tn15bot">)'), r'</div>\1'),
(re.compile('\.<br><br>([^\s])', re.I), r'. \1')
]
def postprocess_data(self, data):
for what in 'birth date', 'death date':
if what in data and not data[what]:
del data[what]
return data
class DOMHTMLOtherWorksParser(DOMParserBase):
"""Parser for the "other works" and "agent" pages of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
owparser = DOMHTMLOtherWorksParser()
result = owparser.parse(otherworks_html_string)
"""
_defGetRefs = True
kind = 'other works'
# XXX: looks like the 'agent' page is no more public.
extractors = [
Extractor(label='other works',
path="//h5[text()='Other works']/" \
"following-sibling::div[1]",
attrs=Attribute(key='self.kind',
path=".//text()",
postprocess=lambda x: x.strip().split('\n\n')))
]
preprocessors = [
(re.compile('(<h5>[^<]+</h5>)', re.I),
r'</div>\1<div class="_imdbpy">'),
(re.compile('(</table>\n</div>\s+)</div>', re.I), r'\1'),
(re.compile('(<div id="tn15bot">)'), r'</div>\1'),
(re.compile('<br/><br/>', re.I), r'\n\n')
]
def _build_episode(link, title, minfo, role, roleA, roleAID):
"""Build an Movie object for a given episode of a series."""
episode_id = analyze_imdbid(link)
notes = u''
minidx = minfo.find(' -')
# Sometimes, for some unknown reason, the role is left in minfo.
if minidx != -1:
slfRole = minfo[minidx+3:].lstrip()
minfo = minfo[:minidx].rstrip()
if slfRole.endswith(')'):
commidx = slfRole.rfind('(')
if commidx != -1:
notes = slfRole[commidx:]
slfRole = slfRole[:commidx]
if slfRole and role is None and roleA is None:
role = slfRole
eps_data = analyze_title(title)
eps_data['kind'] = u'episode'
# FIXME: it's wrong for multiple characters (very rare on tv series?).
if role is None:
role = roleA # At worse, it's None.
if role is None:
roleAID = None
if roleAID is not None:
roleAID = analyze_imdbid(roleAID)
e = Movie(movieID=episode_id, data=eps_data, currentRole=role,
roleID=roleAID, notes=notes)
# XXX: are we missing some notes?
# XXX: does it parse things as "Episode dated 12 May 2005 (12 May 2005)"?
if minfo.startswith('('):
pe = minfo.find(')')
if pe != -1:
date = minfo[1:pe]
if date != '????':
e['original air date'] = date
if eps_data.get('year', '????') == '????':
syear = date.split()[-1]
if syear.isdigit():
e['year'] = int(syear)
return e
class DOMHTMLSeriesParser(DOMParserBase):
"""Parser for the "by TV series" page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLSeriesParser()
result = sparser.parse(filmoseries_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='series',
group="//div[@class='filmo']/span[1]",
group_key="./a[1]",
path="./following-sibling::ol[1]/li/a[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./@href",
'title': "./text()",
'info': "./following-sibling::text()",
'role': "./following-sibling::i[1]/text()",
'roleA': "./following-sibling::a[1]/text()",
'roleAID': "./following-sibling::a[1]/@href"
},
postprocess=lambda x: _build_episode(x.get('link'),
x.get('title'),
(x.get('info') or u'').strip(),
x.get('role'),
x.get('roleA'),
x.get('roleAID'))))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
nd = {}
for key in data.keys():
dom = self.get_dom(key)
link = self.xpath(dom, "//a/@href")[0]
title = self.xpath(dom, "//a/text()")[0][1:-1]
series = Movie(movieID=analyze_imdbid(link),
data=analyze_title(title),
accessSystem=self._as, modFunct=self._modFunct)
nd[series] = []
for episode in data[key]:
# XXX: should we create a copy of 'series', to avoid
# circular references?
episode['episode of'] = series
nd[series].append(episode)
return {'episodes': nd}
class DOMHTMLPersonGenresParser(DOMParserBase):
"""Parser for the "by genre" and "by keywords" pages of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
gparser = DOMHTMLPersonGenresParser()
result = gparser.parse(bygenre_html_string)
"""
kind = 'genres'
_containsObjects = True
extractors = [
Extractor(label='genres',
group="//b/a[@name]/following-sibling::a[1]",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../../following-sibling::ol[1]/li//a[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./@href",
'title': "./text()",
'info': "./following-sibling::text()"
},
postprocess=lambda x: \
build_movie(x.get('title') + \
x.get('info').split('[')[0],
analyze_imdbid(x.get('link')))))
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
return {self.kind: data}
from movieParser import _parse_merchandising_link
class DOMHTMLPersonSalesParser(DOMParserBase):
"""Parser for the "merchandising links" page of a given person.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
sparser = DOMHTMLPersonSalesParser()
result = sparser.parse(sales_html_string)
"""
extractors = [
Extractor(label='merchandising links',
group="//span[@class='merch_title']",
group_key=".//text()",
path="./following-sibling::table[1]/" \
"/td[@class='w_rowtable_colshop']//tr[1]",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./td[2]/a[1]/@href",
'text': "./td[1]/img[1]/@alt",
'cover': "./ancestor::td[1]/../" \
"td[1]/a[1]/img[1]/@src",
},
postprocess=_parse_merchandising_link)),
]
preprocessors = [
(re.compile('(<a name="[^"]+" )/>', re.I), r'\1></a>')
]
def postprocess_data(self, data):
if len(data) == 0:
return {}
return {'merchandising links': data}
from movieParser import DOMHTMLTechParser
from movieParser import DOMHTMLOfficialsitesParser
from movieParser import DOMHTMLAwardsParser
from movieParser import DOMHTMLNewsParser
_OBJECTS = {
'maindetails_parser': ((DOMHTMLMaindetailsParser,), None),
'bio_parser': ((DOMHTMLBioParser,), None),
'otherworks_parser': ((DOMHTMLOtherWorksParser,), None),
#'agent_parser': ((DOMHTMLOtherWorksParser,), {'kind': 'agent'}),
'person_officialsites_parser': ((DOMHTMLOfficialsitesParser,), None),
'person_awards_parser': ((DOMHTMLAwardsParser,), {'subject': 'name'}),
'publicity_parser': ((DOMHTMLTechParser,), {'kind': 'publicity'}),
'person_series_parser': ((DOMHTMLSeriesParser,), None),
'person_contacts_parser': ((DOMHTMLTechParser,), {'kind': 'contacts'}),
'person_genres_parser': ((DOMHTMLPersonGenresParser,), None),
'person_keywords_parser': ((DOMHTMLPersonGenresParser,),
{'kind': 'keywords'}),
'news_parser': ((DOMHTMLNewsParser,), None),
'sales_parser': ((DOMHTMLPersonSalesParser,), None)
}
| [
[
8,
0,
0.025,
0.0483,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0519,
0.0018,
0,
0.66,
0.05,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0537,
0.0018,
0,
0.66,
... | [
"\"\"\"\nparser.http.personParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse\nthe IMDb pages on the akas.imdb.com server about a person.\nE.g., for \"Mel Gibson\" the referred pages would be:\n categorized: http://akas.imdb.com/name/nm0000154/maindetails\n ... |
"""
parser.http.searchMovieParser module (imdb package).
This module provides the HTMLSearchMovieParser class (and the
search_movie_parser instance), used to parse the results of a search
for a given title.
E.g., for when searching for the title "the passion", the parsed
page would be:
http://akas.imdb.com/find?q=the+passion&tt=on&mx=20
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_title, build_title
from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
class DOMBasicMovieParser(DOMParserBase):
"""Simply get the title of a movie and the imdbID.
It's used by the DOMHTMLSearchMovieParser class to return a result
for a direct match (when a search on IMDb results in a single
movie, the web server sends directly the movie page."""
# Stay generic enough to be used also for other DOMBasic*Parser classes.
_titleAttrPath = ".//text()"
_linkPath = "//link[@rel='canonical']"
_titleFunct = lambda self, x: analyze_title(x or u'')
def _init(self):
self.preprocessors += [('<span class="tv-extra">TV mini-series</span>',
'<span class="tv-extra">(mini)</span>')]
self.extractors = [Extractor(label='title',
path="//h1",
attrs=Attribute(key='title',
path=self._titleAttrPath,
postprocess=self._titleFunct)),
Extractor(label='link',
path=self._linkPath,
attrs=Attribute(key='link', path="./@href",
postprocess=lambda x: \
analyze_imdbid((x or u'').replace(
'http://pro.imdb.com', ''))
))]
# Remove 'More at IMDb Pro' links.
preprocessors = [(re.compile(r'<span class="pro-link".*?</span>'), ''),
(re.compile(r'<a href="http://ad.doubleclick.net.*?;id=(co[0-9]{7});'), r'<a href="http://pro.imdb.com/company/\1"></a>< a href="')]
def postprocess_data(self, data):
if not 'link' in data:
data = []
else:
link = data.pop('link')
if (link and data):
data = [(link, data)]
else:
data = []
return data
def custom_analyze_title(title):
"""Remove garbage notes after the (year), (year/imdbIndex) or (year) (TV)"""
# XXX: very crappy. :-(
nt = title.split(' ')[0]
if nt:
title = nt
if not title:
return {}
return analyze_title(title)
# Manage AKAs.
_reAKAStitles = re.compile(r'(?:aka) <em>"(.*?)(<br>|<\/td>)', re.I | re.M)
class DOMHTMLSearchMovieParser(DOMParserBase):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for movies."""
_BaseParser = DOMBasicMovieParser
_notDirectHitTitle = '<title>imdb title'
_titleBuilder = lambda self, x: build_title(x)
_linkPrefix = '/title/tt'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'info': ".//text()",
#'akas': ".//div[@class='_imdbpyAKA']//text()"
'akas': ".//p[@class='find-aka']//text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
custom_analyze_title(x.get('info') or u''),
x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, '/title/tt')]/..",
attrs=_attrs)]
def _init(self):
self.url = u''
def _reset(self):
self.url = u''
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:1024].lower():
if self._linkPrefix == '/title/tt':
# Only for movies.
html_string = html_string.replace('(TV mini-series)', '(mini)')
html_string = html_string.replace('<p class="find-aka">',
'<p class="find-aka">::')
#html_string = _reAKAStitles.sub(
# r'<div class="_imdbpyAKA">\1::</div>\2', html_string)
return html_string
# Direct hit!
dbme = self._BaseParser(useModule=self._useModule)
res = dbme.parse(html_string, url=self.url)
if not res: return u''
res = res['data']
if not (res and res[0]): return u''
link = '%s%s' % (self._linkPrefix, res[0][0])
# # Tries to cope with companies for which links to pro.imdb.com
# # are missing.
# link = self.url.replace(imdbURL_base[:-1], '')
title = self._titleBuilder(res[0][1])
if not (link and title): return u''
link = link.replace('http://pro.imdb.com', '')
new_html = '<td></td><td></td><td><a href="%s">%s</a></td>' % (link,
title)
return new_html
def postprocess_data(self, data):
if not data.has_key('data'):
data['data'] = []
results = getattr(self, 'results', None)
if results is not None:
data['data'][:] = data['data'][:results]
# Horrible hack to support AKAs.
if data and data['data'] and len(data['data'][0]) == 3 and \
isinstance(data['data'][0], tuple):
for idx, datum in enumerate(data['data']):
if not isinstance(datum, tuple):
continue
if datum[2] is not None:
akas = filter(None, datum[2].split('::'))
if self._linkPrefix == '/title/tt':
akas = [a.replace('" - ', '::').rstrip() for a in akas]
akas = [a.replace('aka "', '', 1).lstrip() for a in akas]
datum[1]['akas'] = akas
data['data'][idx] = (datum[0], datum[1])
else:
data['data'][idx] = (datum[0], datum[1])
return data
def add_refs(self, data):
return data
_OBJECTS = {
'search_movie_parser': ((DOMHTMLSearchMovieParser,), None)
}
| [
[
8,
0,
0.0787,
0.1517,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1629,
0.0056,
0,
0.66,
0.125,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.1685,
0.0056,
0,
0.66,... | [
"\"\"\"\nparser.http.searchMovieParser module (imdb package).\n\nThis module provides the HTMLSearchMovieParser class (and the\nsearch_movie_parser instance), used to parse the results of a search\nfor a given title.\nE.g., for when searching for the title \"the passion\", the parsed\npage would be:",
"import re"... |
"""
parser.http.topBottomParser module (imdb package).
This module provides the classes (and the instances), used to parse the
lists of top 250 and bottom 100 movies.
E.g.:
http://akas.imdb.com/chart/top
http://akas.imdb.com/chart/bottom
Copyright 2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_title
from utils import DOMParserBase, Attribute, Extractor, analyze_imdbid
class DOMHTMLTop250Parser(DOMParserBase):
"""Parser for the "top 250" page.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLTop250Parser()
result = tparser.parse(top250_html_string)
"""
label = 'top 250'
ranktext = 'top 250 rank'
def _init(self):
self.extractors = [Extractor(label=self.label,
path="//div[@id='main']//table//tr",
attrs=Attribute(key=None,
multi=True,
path={self.ranktext: "./td[1]//text()",
'rating': "./td[2]//text()",
'title': "./td[3]//text()",
'movieID': "./td[3]//a/@href",
'votes': "./td[4]//text()"
}))]
def postprocess_data(self, data):
if not data or self.label not in data:
return []
mlist = []
data = data[self.label]
# Avoid duplicates. A real fix, using XPath, is auspicabile.
# XXX: probably this is no more needed.
seenIDs = []
for d in data:
if 'movieID' not in d: continue
if self.ranktext not in d: continue
if 'title' not in d: continue
theID = analyze_imdbid(d['movieID'])
if theID is None:
continue
theID = str(theID)
if theID in seenIDs:
continue
seenIDs.append(theID)
minfo = analyze_title(d['title'])
try: minfo[self.ranktext] = int(d[self.ranktext].replace('.', ''))
except: pass
if 'votes' in d:
try: minfo['votes'] = int(d['votes'].replace(',', ''))
except: pass
if 'rating' in d:
try: minfo['rating'] = float(d['rating'])
except: pass
mlist.append((theID, minfo))
return mlist
class DOMHTMLBottom100Parser(DOMHTMLTop250Parser):
"""Parser for the "bottom 100" page.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
tparser = DOMHTMLBottom100Parser()
result = tparser.parse(bottom100_html_string)
"""
label = 'bottom 100'
ranktext = 'bottom 100 rank'
_OBJECTS = {
'top250_parser': ((DOMHTMLTop250Parser,), None),
'bottom100_parser': ((DOMHTMLBottom100Parser,), None)
}
| [
[
8,
0,
0.1226,
0.2358,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2547,
0.0094,
0,
0.66,
0.2,
896,
0,
1,
0,
0,
896,
0,
0
],
[
1,
0,
0.2642,
0.0094,
0,
0.66,
... | [
"\"\"\"\nparser.http.topBottomParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse the\nlists of top 250 and bottom 100 movies.\nE.g.:\n http://akas.imdb.com/chart/top\n http://akas.imdb.com/chart/bottom",
"from imdb.utils import analyze_title",
"from utils ... |
"""
parser.http.searchKeywordParser module (imdb package).
This module provides the HTMLSearchKeywordParser class (and the
search_company_parser instance), used to parse the results of a search
for a given keyword.
E.g., when searching for the keyword "alabama", the parsed page would be:
http://akas.imdb.com/find?s=kw;mx=20;q=alabama
Copyright 2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from utils import Extractor, Attribute, analyze_imdbid
from imdb.utils import analyze_title, analyze_company_name
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicKeywordParser(DOMBasicMovieParser):
"""Simply get the name of a keyword.
It's used by the DOMHTMLSearchKeywordParser class to return a result
for a direct match (when a search on IMDb results in a single
keyword, the web server sends directly the keyword page.
"""
# XXX: it's still to be tested!
# I'm not even sure there can be a direct hit, searching for keywords.
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchKeywordParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, searching for keywords similar to
the one given."""
_BaseParser = DOMBasicKeywordParser
_notDirectHitTitle = '<title>imdb keyword'
_titleBuilder = lambda self, x: x
_linkPrefix = '/keyword/'
_attrs = [Attribute(key='data',
multi=True,
path="./a[1]/text()"
)]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/keyword/')]/..",
attrs=_attrs)]
def custom_analyze_title4kwd(title, yearNote, outline):
"""Return a dictionary with the needed info."""
title = title.strip()
if not title:
return {}
if yearNote:
yearNote = '%s)' % yearNote.split(' ')[0]
title = title + ' ' + yearNote
retDict = analyze_title(title)
if outline:
retDict['plot outline'] = outline
return retDict
class DOMHTMLSearchMovieKeywordParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, searching for movies with the given
keyword."""
_notDirectHitTitle = '<title>best'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'info': "./a[1]//text()",
'ynote': "./span[@class='desc']/text()",
'outline': "./span[@class='outline']//text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
custom_analyze_title4kwd(x.get('info') or u'',
x.get('ynote') or u'',
x.get('outline') or u'')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/title/tt')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_keyword_parser': ((DOMHTMLSearchKeywordParser,),
{'kind': 'keyword', '_basic_parser': DOMBasicKeywordParser}),
'search_moviekeyword_parser': ((DOMHTMLSearchMovieKeywordParser,), None)
}
| [
[
8,
0,
0.1171,
0.2252,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2432,
0.009,
0,
0.66,
0.125,
970,
0,
3,
0,
0,
970,
0,
0
],
[
1,
0,
0.2523,
0.009,
0,
0.66,
... | [
"\"\"\"\nparser.http.searchKeywordParser module (imdb package).\n\nThis module provides the HTMLSearchKeywordParser class (and the\nsearch_company_parser instance), used to parse the results of a search\nfor a given keyword.\nE.g., when searching for the keyword \"alabama\", the parsed page would be:\n http://ak... |
"""
parser.http.searchCompanyParser module (imdb package).
This module provides the HTMLSearchCompanyParser class (and the
search_company_parser instance), used to parse the results of a search
for a given company.
E.g., when searching for the name "Columbia Pictures", the parsed page would be:
http://akas.imdb.com/find?s=co;mx=20;q=Columbia+Pictures
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_company_name, build_company_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCompanyParser(DOMBasicMovieParser):
"""Simply get the name of a company and the imdbID.
It's used by the DOMHTMLSearchCompanyParser class to return a result
for a direct match (when a search on IMDb results in a single
company, the web server sends directly the company page.
"""
_titleFunct = lambda self, x: analyze_company_name(x or u'')
class DOMHTMLSearchCompanyParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCompanyParser
_notDirectHitTitle = '<title>imdb company'
_titleBuilder = lambda self, x: build_company_name(x)
_linkPrefix = '/company/co'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'notes': "./text()[1]"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link')),
analyze_company_name(x.get('name')+(x.get('notes')
or u''), stripNotes=True)
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/company/co')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_company_parser': ((DOMHTMLSearchCompanyParser,),
{'kind': 'company', '_basic_parser': DOMBasicCompanyParser})
}
| [
[
8,
0,
0.1901,
0.3662,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3944,
0.0141,
0,
0.66,
0.1667,
896,
0,
2,
0,
0,
896,
0,
0
],
[
1,
0,
0.4085,
0.0141,
0,
0.66... | [
"\"\"\"\nparser.http.searchCompanyParser module (imdb package).\n\nThis module provides the HTMLSearchCompanyParser class (and the\nsearch_company_parser instance), used to parse the results of a search\nfor a given company.\nE.g., when searching for the name \"Columbia Pictures\", the parsed page would be:\n ht... |
"""
parser.http.searchPersonParser module (imdb package).
This module provides the HTMLSearchPersonParser class (and the
search_person_parser instance), used to parse the results of a search
for a given person.
E.g., when searching for the name "Mel Gibson", the parsed page would be:
http://akas.imdb.com/find?q=Mel+Gibson&nm=on&mx=20
Copyright 2004-2010 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
def _cleanName(n):
"""Clean the name in a title tag."""
if not n:
return u''
n = n.replace('Filmography by type for', '') # FIXME: temporary.
return n
class DOMBasicPersonParser(DOMBasicMovieParser):
"""Simply get the name of a person and the imdbID.
It's used by the DOMHTMLSearchPersonParser class to return a result
for a direct match (when a search on IMDb results in a single
person, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(_cleanName(x), canonical=1)
_reAKASp = re.compile(r'(?:aka|birth name) (<em>")(.*?)"(<br>|<\/em>|<\/td>)',
re.I | re.M)
class DOMHTMLSearchPersonParser(DOMHTMLSearchMovieParser):
"""Parse the html page that the IMDb web server shows when the
"new search system" is used, for persons."""
_BaseParser = DOMBasicPersonParser
_notDirectHitTitle = '<title>imdb name'
_titleBuilder = lambda self, x: build_name(x, canonical=True)
_linkPrefix = '/name/nm'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()",
'index': "./text()[1]",
'akas': ".//div[@class='_imdbpyAKA']/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
analyze_name((x.get('name') or u'') + \
(x.get('index') or u''),
canonical=1), x.get('akas')
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, '/name/nm')]/..",
attrs=_attrs)]
def preprocess_string(self, html_string):
if self._notDirectHitTitle in html_string[:1024].lower():
html_string = _reAKASp.sub(
r'\1<div class="_imdbpyAKA">\2::</div>\3',
html_string)
return DOMHTMLSearchMovieParser.preprocess_string(self, html_string)
_OBJECTS = {
'search_person_parser': ((DOMHTMLSearchPersonParser,),
{'kind': 'person', '_basic_parser': DOMBasicPersonParser})
}
| [
[
8,
0,
0.1467,
0.2826,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3043,
0.0109,
0,
0.66,
0.1111,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3152,
0.0109,
0,
0.66... | [
"\"\"\"\nparser.http.searchPersonParser module (imdb package).\n\nThis module provides the HTMLSearchPersonParser class (and the\nsearch_person_parser instance), used to parse the results of a search\nfor a given person.\nE.g., when searching for the name \"Mel Gibson\", the parsed page would be:\n http://akas.i... |
"""
parser.http.bsouplxml.etree module (imdb.parser.http package).
This module adapts the beautifulsoup interface to lxml.etree module.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import _bsoup as BeautifulSoup
from _bsoup import Tag as Element
import bsoupxpath
# Not directly used by IMDbPY, but do not remove: it's used by IMDbPYKit,
# for example.
def fromstring(xml_string):
"""Return a DOM representation of the string."""
# We try to not use BeautifulSoup.BeautifulStoneSoup.XML_ENTITIES,
# for convertEntities.
return BeautifulSoup.BeautifulStoneSoup(xml_string,
convertEntities=None).findChild(True)
def tostring(element, encoding=None, pretty_print=False):
"""Return a string or unicode representation of an element."""
if encoding is unicode:
encoding = None
# For BeautifulSoup 3.1
#encArgs = {'prettyPrint': pretty_print}
#if encoding is not None:
# encArgs['encoding'] = encoding
#return element.encode(**encArgs)
return element.__str__(encoding, pretty_print)
def setattribute(tag, name, value):
tag[name] = value
def xpath(node, expr):
"""Apply an xpath expression to a node. Return a list of nodes."""
#path = bsoupxpath.Path(expr)
path = bsoupxpath.get_path(expr)
return path.apply(node)
# XXX: monkey patching the beautifulsoup tag class
class _EverythingIsNestable(dict):
""""Fake that every tag is nestable."""
def get(self, key, *args, **kwds):
return []
BeautifulSoup.BeautifulStoneSoup.NESTABLE_TAGS = _EverythingIsNestable()
BeautifulSoup.Tag.tag = property(fget=lambda self: self.name)
BeautifulSoup.Tag.attrib = property(fget=lambda self: self)
BeautifulSoup.Tag.text = property(fget=lambda self: self.string)
BeautifulSoup.Tag.set = setattribute
BeautifulSoup.Tag.getparent = lambda self: self.parent
BeautifulSoup.Tag.drop_tree = BeautifulSoup.Tag.extract
BeautifulSoup.Tag.xpath = xpath
# TODO: setting the text attribute for tags
| [
[
8,
0,
0.1533,
0.2933,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.32,
0.0133,
0,
0.66,
0.0625,
443,
0,
1,
0,
0,
443,
0,
0
],
[
1,
0,
0.3333,
0.0133,
0,
0.66,
... | [
"\"\"\"\nparser.http.bsouplxml.etree module (imdb.parser.http package).\n\nThis module adapts the beautifulsoup interface to lxml.etree module.\n\nCopyright 2008 H. Turgut Uyar <uyar@tekir.org>\n 2008 Davide Alberani <da@erlug.linux.it>",
"import _bsoup as BeautifulSoup",
"from _bsoup import Tag as Ele... |
"""
parser.http.bsoupxpath module (imdb.parser.http package).
This module provides XPath support for BeautifulSoup.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__author__ = 'H. Turgut Uyar <uyar@tekir.org>'
__docformat__ = 'restructuredtext'
import re
import string
import _bsoup as BeautifulSoup
# XPath related enumerations and constants
AXIS_ANCESTOR = 'ancestor'
AXIS_ATTRIBUTE = 'attribute'
AXIS_CHILD = 'child'
AXIS_DESCENDANT = 'descendant'
AXIS_FOLLOWING = 'following'
AXIS_FOLLOWING_SIBLING = 'following-sibling'
AXIS_PRECEDING_SIBLING = 'preceding-sibling'
AXES = (AXIS_ANCESTOR, AXIS_ATTRIBUTE, AXIS_CHILD, AXIS_DESCENDANT,
AXIS_FOLLOWING, AXIS_FOLLOWING_SIBLING, AXIS_PRECEDING_SIBLING)
XPATH_FUNCTIONS = ('starts-with', 'string-length')
def tokenize_path(path):
"""Tokenize a location path into location steps. Return the list of steps.
If two steps are separated by a double slash, the double slashes are part of
the second step. If they are separated by only one slash, the slash is not
included in any of the steps.
"""
# form a list of tuples that mark the start and end positions of steps
separators = []
last_position = 0
i = -1
in_string = False
while i < len(path) - 1:
i = i + 1
if path[i] == "'":
in_string = not in_string
if in_string:
# slashes within strings are not step separators
continue
if path[i] == '/':
if i > 0:
separators.append((last_position, i))
if (path[i+1] == '/'):
last_position = i
i = i + 1
else:
last_position = i + 1
separators.append((last_position, len(path)))
steps = []
for start, end in separators:
steps.append(path[start:end])
return steps
class Path:
"""A location path.
"""
def __init__(self, path, parse=True):
self.path = path
self.steps = []
if parse:
if (path[0] == '/') and (path[1] != '/'):
# if not on the descendant axis, remove the leading slash
path = path[1:]
steps = tokenize_path(path)
for step in steps:
self.steps.append(PathStep(step))
def apply(self, node):
"""Apply the path to a node. Return the resulting list of nodes.
Apply the steps in the path sequentially by sending the output of each
step as input to the next step.
"""
# FIXME: this should return a node SET, not a node LIST
# or at least a list with no duplicates
if self.path[0] == '/':
# for an absolute path, start from the root
if not isinstance(node, BeautifulSoup.Tag) \
or (node.name != '[document]'):
node = node.findParent('[document]')
nodes = [node]
for step in self.steps:
nodes = step.apply(nodes)
return nodes
class PathStep:
"""A location step in a location path.
"""
AXIS_PATTERN = r"""(%s)::|@""" % '|'.join(AXES)
NODE_TEST_PATTERN = r"""\w+(\(\))?"""
PREDICATE_PATTERN = r"""\[(.*?)\]"""
LOCATION_STEP_PATTERN = r"""(%s)?(%s)((%s)*)""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN, PREDICATE_PATTERN)
_re_location_step = re.compile(LOCATION_STEP_PATTERN)
PREDICATE_NOT_PATTERN = r"""not\((.*?)\)"""
PREDICATE_AXIS_PATTERN = r"""(%s)?(%s)(='(.*?)')?""" \
% (AXIS_PATTERN, NODE_TEST_PATTERN)
PREDICATE_FUNCTION_PATTERN = r"""(%s)\(([^,]+(,\s*[^,]+)*)?\)(=(.*))?""" \
% '|'.join(XPATH_FUNCTIONS)
_re_predicate_not = re.compile(PREDICATE_NOT_PATTERN)
_re_predicate_axis = re.compile(PREDICATE_AXIS_PATTERN)
_re_predicate_function = re.compile(PREDICATE_FUNCTION_PATTERN)
def __init__(self, step):
self.step = step
if (step == '.') or (step == '..'):
return
if step[:2] == '//':
default_axis = AXIS_DESCENDANT
step = step[2:]
else:
default_axis = AXIS_CHILD
step_match = self._re_location_step.match(step)
# determine the axis
axis = step_match.group(1)
if axis is None:
self.axis = default_axis
elif axis == '@':
self.axis = AXIS_ATTRIBUTE
else:
self.axis = step_match.group(2)
self.soup_args = {}
self.index = None
self.node_test = step_match.group(3)
if self.node_test == 'text()':
self.soup_args['text'] = True
else:
self.soup_args['name'] = self.node_test
self.checkers = []
predicates = step_match.group(5)
if predicates is not None:
predicates = [p for p in predicates[1:-1].split('][') if p]
for predicate in predicates:
checker = self.__parse_predicate(predicate)
if checker is not None:
self.checkers.append(checker)
def __parse_predicate(self, predicate):
"""Parse the predicate. Return a callable that can be used to filter
nodes. Update `self.soup_args` to take advantage of BeautifulSoup search
features.
"""
try:
position = int(predicate)
if self.axis == AXIS_DESCENDANT:
return PredicateFilter('position', value=position)
else:
# use the search limit feature instead of a checker
self.soup_args['limit'] = position
self.index = position - 1
return None
except ValueError:
pass
if predicate == "last()":
self.index = -1
return None
negate = self._re_predicate_not.match(predicate)
if negate:
predicate = negate.group(1)
function_match = self._re_predicate_function.match(predicate)
if function_match:
name = function_match.group(1)
arguments = function_match.group(2)
value = function_match.group(4)
if value is not None:
value = function_match.group(5)
return PredicateFilter(name, arguments, value)
axis_match = self._re_predicate_axis.match(predicate)
if axis_match:
axis = axis_match.group(1)
if axis is None:
axis = AXIS_CHILD
elif axis == '@':
axis = AXIS_ATTRIBUTE
if axis == AXIS_ATTRIBUTE:
# use the attribute search feature instead of a checker
attribute_name = axis_match.group(3)
if axis_match.group(5) is not None:
attribute_value = axis_match.group(6)
elif not negate:
attribute_value = True
else:
attribute_value = None
if not self.soup_args.has_key('attrs'):
self.soup_args['attrs'] = {}
self.soup_args['attrs'][attribute_name] = attribute_value
return None
elif axis == AXIS_CHILD:
node_test = axis_match.group(3)
node_value = axis_match.group(6)
return PredicateFilter('axis', node_test, value=node_value,
negate=negate)
raise NotImplementedError("This predicate is not implemented")
def apply(self, nodes):
"""Apply the step to a list of nodes. Return the list of nodes for the
next step.
"""
if self.step == '.':
return nodes
elif self.step == '..':
return [node.parent for node in nodes]
result = []
for node in nodes:
if self.axis == AXIS_CHILD:
found = node.findAll(recursive=False, **self.soup_args)
elif self.axis == AXIS_DESCENDANT:
found = node.findAll(recursive=True, **self.soup_args)
elif self.axis == AXIS_ATTRIBUTE:
try:
found = [node[self.node_test]]
except KeyError:
found = []
elif self.axis == AXIS_FOLLOWING_SIBLING:
found = node.findNextSiblings(**self.soup_args)
elif self.axis == AXIS_PRECEDING_SIBLING:
# TODO: make sure that the result is reverse ordered
found = node.findPreviousSiblings(**self.soup_args)
elif self.axis == AXIS_FOLLOWING:
# find the last descendant of this node
last = node
while (not isinstance(last, BeautifulSoup.NavigableString)) \
and (len(last.contents) > 0):
last = last.contents[-1]
found = last.findAllNext(**self.soup_args)
elif self.axis == AXIS_ANCESTOR:
found = node.findParents(**self.soup_args)
# this should only be active if there is a position predicate
# and the axis is not 'descendant'
if self.index is not None:
if found:
if len(found) > self.index:
found = [found[self.index]]
else:
found = []
if found:
for checker in self.checkers:
found = filter(checker, found)
result.extend(found)
return result
class PredicateFilter:
"""A callable class for filtering nodes.
"""
def __init__(self, name, arguments=None, value=None, negate=False):
self.name = name
self.arguments = arguments
self.negate = negate
if name == 'position':
self.__filter = self.__position
self.value = value
elif name == 'axis':
self.__filter = self.__axis
self.node_test = arguments
self.value = value
elif name == 'starts-with':
self.__filter = self.__starts_with
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:], args[1][1:-1])
else:
self.arguments = (False, args[0], args[1][1:-1])
elif name == 'string-length':
self.__filter = self.__string_length
args = map(string.strip, arguments.split(','))
if args[0][0] == '@':
self.arguments = (True, args[0][1:])
else:
self.arguments = (False, args[0])
self.value = int(value)
else:
raise NotImplementedError("This XPath function is not implemented")
def __call__(self, node):
if self.negate:
return not self.__filter(node)
else:
return self.__filter(node)
def __position(self, node):
if isinstance(node, BeautifulSoup.NavigableString):
actual_position = len(node.findPreviousSiblings(text=True)) + 1
else:
actual_position = len(node.findPreviousSiblings(node.name)) + 1
return actual_position == self.value
def __axis(self, node):
if self.node_test == 'text()':
return node.string == self.value
else:
children = node.findAll(self.node_test, recursive=False)
if len(children) > 0 and self.value is None:
return True
for child in children:
if child.string == self.value:
return True
return False
def __starts_with(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
first = node[attribute_name]
return first.startswith(self.arguments[2])
elif self.arguments[1] == 'text()':
first = node.contents[0]
if isinstance(first, BeautifulSoup.NavigableString):
return first.startswith(self.arguments[2])
return False
def __string_length(self, node):
if self.arguments[0]:
# this is an attribute
attribute_name = self.arguments[1]
if node.has_key(attribute_name):
value = node[attribute_name]
else:
value = None
elif self.arguments[1] == 'text()':
value = node.string
if value is not None:
return len(value) == self.value
return False
_paths = {}
_steps = {}
def get_path(path):
"""Utility for eliminating repeated parsings of the same paths and steps.
"""
if not _paths.has_key(path):
p = Path(path, parse=False)
steps = tokenize_path(path)
for step in steps:
if not _steps.has_key(step):
_steps[step] = PathStep(step)
p.steps.append(_steps[step])
_paths[path] = p
return _paths[path]
| [
[
8,
0,
0.0279,
0.0533,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0584,
0.0025,
0,
0.66,
0.0476,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0609,
0.0025,
0,
0.66... | [
"\"\"\"\nparser.http.bsoupxpath module (imdb.parser.http package).\n\nThis module provides XPath support for BeautifulSoup.\n\nCopyright 2008 H. Turgut Uyar <uyar@tekir.org>\n\nThis program is free software; you can redistribute it and/or modify",
"__author__ = 'H. Turgut Uyar <uyar@tekir.org>'",
"__docformat__... |
"""
parser.http.bsouplxml.html module (imdb.parser.http package).
This module adapts the beautifulsoup interface to lxml.html module.
Copyright 2008 H. Turgut Uyar <uyar@tekir.org>
2008 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import _bsoup as BeautifulSoup
def fromstring(html_string):
"""Return a DOM representation of the string."""
return BeautifulSoup.BeautifulSoup(html_string,
convertEntities=BeautifulSoup.BeautifulSoup.HTML_ENTITIES
).findChild(True)
| [
[
8,
0,
0.371,
0.7097,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7742,
0.0323,
0,
0.66,
0.5,
443,
0,
1,
0,
0,
443,
0,
0
],
[
2,
0,
0.9355,
0.1613,
0,
0.66,
... | [
"\"\"\"\nparser.http.bsouplxml.html module (imdb.parser.http package).\n\nThis module adapts the beautifulsoup interface to lxml.html module.\n\nCopyright 2008 H. Turgut Uyar <uyar@tekir.org>\n 2008 Davide Alberani <da@erlug.linux.it>",
"import _bsoup as BeautifulSoup",
"def fromstring(html_string):\n ... |
"""
parser.http.companyParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a company.
E.g., for "Columbia Pictures [us]" the referred page would be:
main details: http://akas.imdb.com/company/co0071509/
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from utils import build_movie, Attribute, Extractor, DOMParserBase, \
analyze_imdbid
from imdb.utils import analyze_company_name
class DOMCompanyParser(DOMParserBase):
"""Parser for the main page of a given company.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
cparser = DOMCompanyParser()
result = cparser.parse(company_html_string)
"""
_containsObjects = True
extractors = [
Extractor(label='name',
path="//title",
attrs=Attribute(key='name',
path="./text()",
postprocess=lambda x: \
analyze_company_name(x, stripNotes=True))),
Extractor(label='filmography',
group="//b/a[@name]",
group_key="./text()",
group_key_normalize=lambda x: x.lower(),
path="../following-sibling::ol[1]/li",
attrs=Attribute(key=None,
multi=True,
path={
'link': "./a[1]/@href",
'title': "./a[1]/text()",
'year': "./text()[1]"
},
postprocess=lambda x:
build_movie(u'%s %s' % \
(x.get('title'), x.get('year').strip()),
movieID=analyze_imdbid(x.get('link') or u''),
_parsingCompany=True))),
]
preprocessors = [
(re.compile('(<b><a name=)', re.I), r'</p>\1')
]
def postprocess_data(self, data):
for key in data.keys():
new_key = key.replace('company', 'companies')
new_key = new_key.replace('other', 'miscellaneous')
new_key = new_key.replace('distributor', 'distributors')
if new_key != key:
data[new_key] = data[key]
del data[key]
return data
_OBJECTS = {
'company_main_parser': ((DOMCompanyParser,), None)
}
| [
[
8,
0,
0.1429,
0.2747,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2967,
0.011,
0,
0.66,
0.2,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3132,
0.022,
0,
0.66,
... | [
"\"\"\"\nparser.http.companyParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse\nthe IMDb pages on the akas.imdb.com server about a company.\nE.g., for \"Columbia Pictures [us]\" the referred page would be:\n main details: http://akas.imdb.com/company/co0071509/... |
"""
parser.http.characterParser module (imdb package).
This module provides the classes (and the instances), used to parse
the IMDb pages on the akas.imdb.com server about a character.
E.g., for "Jesse James" the referred pages would be:
main details: http://www.imdb.com/character/ch0000001/
biography: http://www.imdb.com/character/ch0000001/bio
...and so on...
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import re
from utils import Attribute, Extractor, DOMParserBase, build_movie, \
analyze_imdbid
from personParser import DOMHTMLMaindetailsParser
from imdb.Movie import Movie
_personIDs = re.compile(r'/name/nm([0-9]{7})')
class DOMHTMLCharacterMaindetailsParser(DOMHTMLMaindetailsParser):
"""Parser for the "filmography" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bparser = DOMHTMLCharacterMaindetailsParser()
result = bparser.parse(character_biography_html_string)
"""
_containsObjects = True
_film_attrs = [Attribute(key=None,
multi=True,
path={
'link': "./a[1]/@href",
'title': ".//text()",
'status': "./i/a//text()",
'roleID': "./a/@href"
},
postprocess=lambda x:
build_movie(x.get('title') or u'',
movieID=analyze_imdbid(x.get('link') or u''),
roleID=_personIDs.findall(x.get('roleID') or u''),
status=x.get('status') or None,
_parsingCharacter=True))]
extractors = [
Extractor(label='title',
path="//title",
attrs=Attribute(key='name',
path="./text()",
postprocess=lambda x: \
x.replace(' (Character)', '').replace(
'- Filmography by type', '').strip())),
Extractor(label='headshot',
path="//a[@name='headshot']",
attrs=Attribute(key='headshot',
path="./img/@src")),
Extractor(label='akas',
path="//div[h5='Alternate Names:']",
attrs=Attribute(key='akas',
path="./div//text()",
postprocess=lambda x: x.strip().split(' / '))),
Extractor(label='filmography',
path="//div[@class='filmo'][not(h5)]/ol/li",
attrs=_film_attrs),
Extractor(label='filmography sections',
group="//div[@class='filmo'][h5]",
group_key="./h5/a/text()",
group_key_normalize=lambda x: x.lower()[:-1],
path="./ol/li",
attrs=_film_attrs),
]
preprocessors = [
# Check that this doesn't cut "status"...
(re.compile(r'<br>(\.\.\.| ).+?</li>', re.I | re.M), '</li>')]
class DOMHTMLCharacterBioParser(DOMParserBase):
"""Parser for the "biography" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
bparser = DOMHTMLCharacterBioParser()
result = bparser.parse(character_biography_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='introduction',
path="//div[@id='_intro']",
attrs=Attribute(key='introduction',
path=".//text()",
postprocess=lambda x: x.strip())),
Extractor(label='biography',
path="//span[@class='_biography']",
attrs=Attribute(key='biography',
multi=True,
path={
'info': "./preceding-sibling::h4[1]//text()",
'text': ".//text()"
},
postprocess=lambda x: u'%s: %s' % (
x.get('info').strip(),
x.get('text').replace('\n',
' ').replace('||', '\n\n').strip()))),
]
preprocessors = [
(re.compile('(<div id="swiki.2.3.1">)', re.I), r'\1<div id="_intro">'),
(re.compile('(<a name="history">)\s*(<table .*?</table>)',
re.I | re.DOTALL),
r'</div>\2\1</a>'),
(re.compile('(<a name="[^"]+">)(<h4>)', re.I), r'</span>\1</a>\2'),
(re.compile('(</h4>)</a>', re.I), r'\1<span class="_biography">'),
(re.compile('<br/><br/>', re.I), r'||'),
(re.compile('\|\|\n', re.I), r'</span>'),
]
class DOMHTMLCharacterQuotesParser(DOMParserBase):
"""Parser for the "quotes" page of a given character.
The page should be provided as a string, as taken from
the akas.imdb.com server. The final result will be a
dictionary, with a key for every relevant section.
Example:
qparser = DOMHTMLCharacterQuotesParser()
result = qparser.parse(character_quotes_html_string)
"""
_defGetRefs = True
extractors = [
Extractor(label='charquotes',
group="//h5",
group_key="./a/text()",
path="./following-sibling::div[1]",
attrs=Attribute(key=None,
path={'txt': ".//text()",
'movieID': ".//a[1]/@href"},
postprocess=lambda x: (analyze_imdbid(x['movieID']),
x['txt'].strip().replace(': ',
': ').replace(': ', ': ').split('||'))))
]
preprocessors = [
(re.compile('(</h5>)', re.I), r'\1<div>'),
(re.compile('\s*<br/><br/>\s*', re.I), r'||'),
(re.compile('\|\|\s*(<hr/>)', re.I), r'</div>\1'),
(re.compile('\s*<br/>\s*', re.I), r'::')
]
def postprocess_data(self, data):
if not data:
return {}
newData = {}
for title in data:
movieID, quotes = data[title]
if movieID is None:
movie = title
else:
movie = Movie(title=title, movieID=movieID,
accessSystem=self._as, modFunct=self._modFunct)
newData[movie] = [quote.split('::') for quote in quotes]
return {'quotes': newData}
from personParser import DOMHTMLSeriesParser
_OBJECTS = {
'character_main_parser': ((DOMHTMLCharacterMaindetailsParser,),
{'kind': 'character'}),
'character_series_parser': ((DOMHTMLSeriesParser,), None),
'character_bio_parser': ((DOMHTMLCharacterBioParser,), None),
'character_quotes_parser': ((DOMHTMLCharacterQuotesParser,), None)
}
| [
[
8,
0,
0.069,
0.133,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1429,
0.0049,
0,
0.66,
0.1,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.1502,
0.0099,
0,
0.66,
... | [
"\"\"\"\nparser.http.characterParser module (imdb package).\n\nThis module provides the classes (and the instances), used to parse\nthe IMDb pages on the akas.imdb.com server about a character.\nE.g., for \"Jesse James\" the referred pages would be:\n main details: http://www.imdb.com/character/ch0000001/\n ... |
"""
parser.http.searchCharacterParser module (imdb package).
This module provides the HTMLSearchCharacterParser class (and the
search_character_parser instance), used to parse the results of a search
for a given character.
E.g., when searching for the name "Jesse James", the parsed page would be:
http://akas.imdb.com/find?s=Characters;mx=20;q=Jesse+James
Copyright 2007-2009 Davide Alberani <da@erlug.linux.it>
2008 H. Turgut Uyar <uyar@tekir.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from imdb.utils import analyze_name, build_name
from utils import Extractor, Attribute, analyze_imdbid
from searchMovieParser import DOMHTMLSearchMovieParser, DOMBasicMovieParser
class DOMBasicCharacterParser(DOMBasicMovieParser):
"""Simply get the name of a character and the imdbID.
It's used by the DOMHTMLSearchCharacterParser class to return a result
for a direct match (when a search on IMDb results in a single
character, the web server sends directly the movie page."""
_titleFunct = lambda self, x: analyze_name(x or u'', canonical=False)
class DOMHTMLSearchCharacterParser(DOMHTMLSearchMovieParser):
_BaseParser = DOMBasicCharacterParser
_notDirectHitTitle = '<title>imdb search'
_titleBuilder = lambda self, x: build_name(x, canonical=False)
_linkPrefix = '/character/ch'
_attrs = [Attribute(key='data',
multi=True,
path={
'link': "./a[1]/@href",
'name': "./a[1]/text()"
},
postprocess=lambda x: (
analyze_imdbid(x.get('link') or u''),
{'name': x.get('name')}
))]
extractors = [Extractor(label='search',
path="//td[3]/a[starts-with(@href, " \
"'/character/ch')]/..",
attrs=_attrs)]
_OBJECTS = {
'search_character_parser': ((DOMHTMLSearchCharacterParser,),
{'kind': 'character', '_basic_parser': DOMBasicCharacterParser})
}
| [
[
8,
0,
0.1957,
0.3768,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.4058,
0.0145,
0,
0.66,
0.1667,
896,
0,
2,
0,
0,
896,
0,
0
],
[
1,
0,
0.4203,
0.0145,
0,
0.66... | [
"\"\"\"\nparser.http.searchCharacterParser module (imdb package).\n\nThis module provides the HTMLSearchCharacterParser class (and the\nsearch_character_parser instance), used to parse the results of a search\nfor a given character.\nE.g., when searching for the name \"Jesse James\", the parsed page would be:\n ... |
"""
parser package (imdb package).
This package provides various parsers to access IMDb data (e.g.: a
parser for the web/http interface, a parser for the SQL database
interface, etc.).
So far, the http/httpThin, mobile and sql parsers are implemented.
Copyright 2004-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
__all__ = ['http', 'mobile', 'sql']
| [
[
8,
0,
0.4464,
0.8571,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.9286,
0.0357,
0,
0.66,
1,
272,
0,
0,
0,
0,
0,
5,
0
]
] | [
"\"\"\"\nparser package (imdb package).\n\nThis package provides various parsers to access IMDb data (e.g.: a\nparser for the web/http interface, a parser for the SQL database\ninterface, etc.).\nSo far, the http/httpThin, mobile and sql parsers are implemented.",
"__all__ = ['http', 'mobile', 'sql']"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.