code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Schema processing for discovery based APIs
Schemas holds an APIs discovery schemas. It can return those schema as
deserialized JSON objects, or pretty print them as prototype objects that
conform to the schema.
For example, given the schema:
schema = \"\"\"{
"Foo": {
"type": "object",
"properties": {
"etag": {
"type": "string",
"description": "ETag of the collection."
},
"kind": {
"type": "string",
"description": "Type of the collection ('calendar#acl').",
"default": "calendar#acl"
},
"nextPageToken": {
"type": "string",
"description": "Token used to access the next
page of this result. Omitted if no further results are available."
}
}
}
}\"\"\"
s = Schemas(schema)
print s.prettyPrintByName('Foo')
Produces the following output:
{
"nextPageToken": "A String", # Token used to access the
# next page of this result. Omitted if no further results are available.
"kind": "A String", # Type of the collection ('calendar#acl').
"etag": "A String", # ETag of the collection.
},
The constructor takes a discovery document in which to look up named schema.
"""
# TODO(jcgregorio) support format, enum, minimum, maximum
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import copy
from oauth2client.anyjson import simplejson
class Schemas(object):
"""Schemas for an API."""
def __init__(self, discovery):
"""Constructor.
Args:
discovery: object, Deserialized discovery document from which we pull
out the named schema.
"""
self.schemas = discovery.get('schemas', {})
# Cache of pretty printed schemas.
self.pretty = {}
def _prettyPrintByName(self, name, seen=None, dent=0):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
if name in seen:
# Do not fall into an infinite loop over recursive definitions.
return '# Object with schema name: %s' % name
seen.append(name)
if name not in self.pretty:
self.pretty[name] = _SchemaToStruct(self.schemas[name],
seen, dent).to_str(self._prettyPrintByName)
seen.pop()
return self.pretty[name]
def prettyPrintByName(self, name):
"""Get pretty printed object prototype from the schema name.
Args:
name: string, Name of schema in the discovery document.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintByName(name, seen=[], dent=1)[:-2]
def _prettyPrintSchema(self, schema, seen=None, dent=0):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
seen: list of string, Names of schema already seen. Used to handle
recursive definitions.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
if seen is None:
seen = []
return _SchemaToStruct(schema, seen, dent).to_str(self._prettyPrintByName)
def prettyPrintSchema(self, schema):
"""Get pretty printed object prototype of schema.
Args:
schema: object, Parsed JSON schema.
Returns:
string, A string that contains a prototype object with
comments that conforms to the given schema.
"""
# Return with trailing comma and newline removed.
return self._prettyPrintSchema(schema, dent=1)[:-2]
def get(self, name):
"""Get deserialized JSON schema from the schema name.
Args:
name: string, Schema name.
"""
return self.schemas[name]
class _SchemaToStruct(object):
"""Convert schema to a prototype object."""
def __init__(self, schema, seen, dent=0):
"""Constructor.
Args:
schema: object, Parsed JSON schema.
seen: list, List of names of schema already seen while parsing. Used to
handle recursive definitions.
dent: int, Initial indentation depth.
"""
# The result of this parsing kept as list of strings.
self.value = []
# The final value of the parsing.
self.string = None
# The parsed JSON schema.
self.schema = schema
# Indentation level.
self.dent = dent
# Method that when called returns a prototype object for the schema with
# the given name.
self.from_cache = None
# List of names of schema already seen while parsing.
self.seen = seen
def emit(self, text):
"""Add text as a line to the output.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text, '\n'])
def emitBegin(self, text):
"""Add text to the output, but with no line terminator.
Args:
text: string, Text to output.
"""
self.value.extend([" " * self.dent, text])
def emitEnd(self, text, comment):
"""Add text and comment to the output with line terminator.
Args:
text: string, Text to output.
comment: string, Python comment.
"""
if comment:
divider = '\n' + ' ' * (self.dent + 2) + '# '
lines = comment.splitlines()
lines = [x.rstrip() for x in lines]
comment = divider.join(lines)
self.value.extend([text, ' # ', comment, '\n'])
else:
self.value.extend([text, '\n'])
def indent(self):
"""Increase indentation level."""
self.dent += 1
def undent(self):
"""Decrease indentation level."""
self.dent -= 1
def _to_str_impl(self, schema):
"""Prototype object based on the schema, in Python code with comments.
Args:
schema: object, Parsed JSON schema file.
Returns:
Prototype object based on the schema, in Python code with comments.
"""
stype = schema.get('type')
if stype == 'object':
self.emitEnd('{', schema.get('description', ''))
self.indent()
for pname, pschema in schema.get('properties', {}).iteritems():
self.emitBegin('"%s": ' % pname)
self._to_str_impl(pschema)
self.undent()
self.emit('},')
elif '$ref' in schema:
schemaName = schema['$ref']
description = schema.get('description', '')
s = self.from_cache(schemaName, self.seen)
parts = s.splitlines()
self.emitEnd(parts[0], description)
for line in parts[1:]:
self.emit(line.rstrip())
elif stype == 'boolean':
value = schema.get('default', 'True or False')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'string':
value = schema.get('default', 'A String')
self.emitEnd('"%s",' % str(value), schema.get('description', ''))
elif stype == 'integer':
value = schema.get('default', '42')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'number':
value = schema.get('default', '3.14')
self.emitEnd('%s,' % str(value), schema.get('description', ''))
elif stype == 'null':
self.emitEnd('None,', schema.get('description', ''))
elif stype == 'any':
self.emitEnd('"",', schema.get('description', ''))
elif stype == 'array':
self.emitEnd('[', schema.get('description'))
self.indent()
self.emitBegin('')
self._to_str_impl(schema['items'])
self.undent()
self.emit('],')
else:
self.emit('Unknown type! %s' % stype)
self.emitEnd('', '')
self.string = ''.join(self.value)
return self.string
def to_str(self, from_cache):
"""Prototype object based on the schema, in Python code with comments.
Args:
from_cache: callable(name, seen), Callable that retrieves an object
prototype for a schema with the given name. Seen is a list of schema
names already seen as we recursively descend the schema definition.
Returns:
Prototype object based on the schema, in Python code with comments.
The lines of the code will all be properly indented.
"""
self.from_cache = from_cache
return self._to_str_impl(self.schema)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for discovery based APIs
A client library for Google's discovery based APIs.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
__all__ = [
'build', 'build_from_document'
]
import copy
import httplib2
import logging
import os
import random
import re
import uritemplate
import urllib
import urlparse
import mimeparse
import mimetypes
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
from apiclient.errors import HttpError
from apiclient.errors import InvalidJsonError
from apiclient.errors import MediaUploadSizeError
from apiclient.errors import UnacceptableMimeTypeError
from apiclient.errors import UnknownApiNameOrVersion
from apiclient.errors import UnknownLinkType
from apiclient.http import HttpRequest
from apiclient.http import MediaFileUpload
from apiclient.http import MediaUpload
from apiclient.model import JsonModel
from apiclient.model import RawModel
from apiclient.schema import Schemas
from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from oauth2client.anyjson import simplejson
logger = logging.getLogger(__name__)
URITEMPLATE = re.compile('{[^}]*}')
VARNAME = re.compile('[a-zA-Z0-9_-]+')
DISCOVERY_URI = ('https://www.googleapis.com/discovery/v1/apis/'
'{api}/{apiVersion}/rest')
DEFAULT_METHOD_DOC = 'A description of how to use this function'
# Query parameters that work, but don't appear in discovery
STACK_QUERY_PARAMETERS = ['trace', 'fields', 'pp', 'prettyPrint', 'userIp',
'userip', 'strict']
RESERVED_WORDS = ['and', 'assert', 'break', 'class', 'continue', 'def', 'del',
'elif', 'else', 'except', 'exec', 'finally', 'for', 'from',
'global', 'if', 'import', 'in', 'is', 'lambda', 'not', 'or',
'pass', 'print', 'raise', 'return', 'try', 'while' ]
def _fix_method_name(name):
if name in RESERVED_WORDS:
return name + '_'
else:
return name
def _write_headers(self):
# Utility no-op method for multipart media handling
pass
def _add_query_parameter(url, name, value):
"""Adds a query parameter to a url.
Replaces the current value if it already exists in the URL.
Args:
url: string, url to add the query parameter to.
name: string, query parameter name.
value: string, query parameter value.
Returns:
Updated query parameter. Does not update the url if value is None.
"""
if value is None:
return url
else:
parsed = list(urlparse.urlparse(url))
q = dict(parse_qsl(parsed[4]))
q[name] = value
parsed[4] = urllib.urlencode(q)
return urlparse.urlunparse(parsed)
def key2param(key):
"""Converts key names into parameter names.
For example, converting "max-results" -> "max_results"
"""
result = []
key = list(key)
if not key[0].isalpha():
result.append('x')
for c in key:
if c.isalnum():
result.append(c)
else:
result.append('_')
return ''.join(result)
def build(serviceName,
version,
http=None,
discoveryServiceUrl=DISCOVERY_URI,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Construct a Resource for interacting with an API.
Construct a Resource object for interacting with
an API. The serviceName and version are the
names from the Discovery service.
Args:
serviceName: string, name of the service
version: string, the version of the service
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
discoveryServiceUrl: string, a URI Template that points to
the location of the discovery service. It should have two
parameters {api} and {apiVersion} that when filled in
produce an absolute URI to the discovery document for
that service.
developerKey: string, key obtained
from https://code.google.com/apis/console
model: apiclient.Model, converts to and from the wire format
requestBuilder: apiclient.http.HttpRequest, encapsulator for
an HTTP request
Returns:
A Resource object with methods for interacting with
the service.
"""
params = {
'api': serviceName,
'apiVersion': version
}
if http is None:
http = httplib2.Http()
requested_url = uritemplate.expand(discoveryServiceUrl, params)
# REMOTE_ADDR is defined by the CGI spec [RFC3875] as the environment
# variable that contains the network address of the client sending the
# request. If it exists then add that to the request for the discovery
# document to avoid exceeding the quota on discovery requests.
if 'REMOTE_ADDR' in os.environ:
requested_url = _add_query_parameter(requested_url, 'userIp',
os.environ['REMOTE_ADDR'])
logger.info('URL being requested: %s' % requested_url)
resp, content = http.request(requested_url)
if resp.status == 404:
raise UnknownApiNameOrVersion("name: %s version: %s" % (serviceName,
version))
if resp.status >= 400:
raise HttpError(resp, content, requested_url)
try:
service = simplejson.loads(content)
except ValueError, e:
logger.error('Failed to parse as JSON: ' + content)
raise InvalidJsonError()
filename = os.path.join(os.path.dirname(__file__), 'contrib',
serviceName, 'future.json')
try:
f = file(filename, 'r')
future = f.read()
f.close()
except IOError:
future = None
return build_from_document(content, discoveryServiceUrl, future,
http, developerKey, model, requestBuilder)
def build_from_document(
service,
base,
future=None,
http=None,
developerKey=None,
model=None,
requestBuilder=HttpRequest):
"""Create a Resource for interacting with an API.
Same as `build()`, but constructs the Resource object
from a discovery document that is it given, as opposed to
retrieving one over HTTP.
Args:
service: string, discovery document
base: string, base URI for all HTTP requests, usually the discovery URI
future: string, discovery document with future capabilities
auth_discovery: dict, information about the authentication the API supports
http: httplib2.Http, An instance of httplib2.Http or something that acts
like it that HTTP requests will be made through.
developerKey: string, Key for controlling API usage, generated
from the API Console.
model: Model class instance that serializes and
de-serializes requests and responses.
requestBuilder: Takes an http request and packages it up to be executed.
Returns:
A Resource object with methods for interacting with
the service.
"""
service = simplejson.loads(service)
base = urlparse.urljoin(base, service['basePath'])
if future:
future = simplejson.loads(future)
auth_discovery = future.get('auth', {})
else:
future = {}
auth_discovery = {}
schema = Schemas(service)
if model is None:
features = service.get('features', [])
model = JsonModel('dataWrapper' in features)
resource = createResource(http, base, model, requestBuilder, developerKey,
service, future, schema)
def auth_method():
"""Discovery information about the authentication the API uses."""
return auth_discovery
setattr(resource, 'auth_discovery', auth_method)
return resource
def _cast(value, schema_type):
"""Convert value to a string based on JSON Schema type.
See http://tools.ietf.org/html/draft-zyp-json-schema-03 for more details on
JSON Schema.
Args:
value: any, the value to convert
schema_type: string, the type that value should be interpreted as
Returns:
A string representation of 'value' based on the schema_type.
"""
if schema_type == 'string':
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
elif schema_type == 'integer':
return str(int(value))
elif schema_type == 'number':
return str(float(value))
elif schema_type == 'boolean':
return str(bool(value)).lower()
else:
if type(value) == type('') or type(value) == type(u''):
return value
else:
return str(value)
MULTIPLIERS = {
"KB": 2 ** 10,
"MB": 2 ** 20,
"GB": 2 ** 30,
"TB": 2 ** 40,
}
def _media_size_to_long(maxSize):
"""Convert a string media size, such as 10GB or 3TB into an integer."""
if len(maxSize) < 2:
return 0
units = maxSize[-2:].upper()
multiplier = MULTIPLIERS.get(units, 0)
if multiplier:
return int(maxSize[:-2]) * multiplier
else:
return int(maxSize)
def createResource(http, baseUrl, model, requestBuilder,
developerKey, resourceDesc, futureDesc, schema):
class Resource(object):
"""A class for interacting with a resource."""
def __init__(self):
self._http = http
self._baseUrl = baseUrl
self._model = model
self._developerKey = developerKey
self._requestBuilder = requestBuilder
def createMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
pathUrl = methodDesc['path']
httpMethod = methodDesc['httpMethod']
methodId = methodDesc['id']
mediaPathUrl = None
accept = []
maxSize = 0
if 'mediaUpload' in methodDesc:
mediaUpload = methodDesc['mediaUpload']
# TODO(jcgregorio) Use URLs from discovery once it is updated.
parsed = list(urlparse.urlparse(baseUrl))
basePath = parsed[2]
mediaPathUrl = '/upload' + basePath + pathUrl
accept = mediaUpload['accept']
maxSize = _media_size_to_long(mediaUpload.get('maxSize', ''))
if 'parameters' not in methodDesc:
methodDesc['parameters'] = {}
for name in STACK_QUERY_PARAMETERS:
methodDesc['parameters'][name] = {
'type': 'string',
'location': 'query'
}
if httpMethod in ['PUT', 'POST', 'PATCH'] and 'request' in methodDesc:
methodDesc['parameters']['body'] = {
'description': 'The request body.',
'type': 'object',
'required': True,
}
if 'request' in methodDesc:
methodDesc['parameters']['body'].update(methodDesc['request'])
else:
methodDesc['parameters']['body']['type'] = 'object'
if 'mediaUpload' in methodDesc:
methodDesc['parameters']['media_body'] = {
'description': 'The filename of the media request body.',
'type': 'string',
'required': False,
}
if 'body' in methodDesc['parameters']:
methodDesc['parameters']['body']['required'] = False
argmap = {} # Map from method parameter name to query parameter name
required_params = [] # Required parameters
repeated_params = [] # Repeated parameters
pattern_params = {} # Parameters that must match a regex
query_params = [] # Parameters that will be used in the query string
path_params = {} # Parameters that will be used in the base URL
param_type = {} # The type of the parameter
enum_params = {} # Allowable enumeration values for each parameter
if 'parameters' in methodDesc:
for arg, desc in methodDesc['parameters'].iteritems():
param = key2param(arg)
argmap[param] = arg
if desc.get('pattern', ''):
pattern_params[param] = desc['pattern']
if desc.get('enum', ''):
enum_params[param] = desc['enum']
if desc.get('required', False):
required_params.append(param)
if desc.get('repeated', False):
repeated_params.append(param)
if desc.get('location') == 'query':
query_params.append(param)
if desc.get('location') == 'path':
path_params[param] = param
param_type[param] = desc.get('type', 'string')
for match in URITEMPLATE.finditer(pathUrl):
for namematch in VARNAME.finditer(match.group(0)):
name = key2param(namematch.group(0))
path_params[name] = name
if name in query_params:
query_params.remove(name)
def method(self, **kwargs):
for name in kwargs.iterkeys():
if name not in argmap:
raise TypeError('Got an unexpected keyword argument "%s"' % name)
for name in required_params:
if name not in kwargs:
raise TypeError('Missing required parameter "%s"' % name)
for name, regex in pattern_params.iteritems():
if name in kwargs:
if isinstance(kwargs[name], basestring):
pvalues = [kwargs[name]]
else:
pvalues = kwargs[name]
for pvalue in pvalues:
if re.match(regex, pvalue) is None:
raise TypeError(
'Parameter "%s" value "%s" does not match the pattern "%s"' %
(name, pvalue, regex))
for name, enums in enum_params.iteritems():
if name in kwargs:
# We need to handle the case of a repeated enum
# name differently, since we want to handle both
# arg='value' and arg=['value1', 'value2']
if (name in repeated_params and
not isinstance(kwargs[name], basestring)):
values = kwargs[name]
else:
values = [kwargs[name]]
for value in values:
if value not in enums:
raise TypeError(
'Parameter "%s" value "%s" is not an allowed value in "%s"' %
(name, value, str(enums)))
actual_query_params = {}
actual_path_params = {}
for key, value in kwargs.iteritems():
to_type = param_type.get(key, 'string')
# For repeated parameters we cast each member of the list.
if key in repeated_params and type(value) == type([]):
cast_value = [_cast(x, to_type) for x in value]
else:
cast_value = _cast(value, to_type)
if key in query_params:
actual_query_params[argmap[key]] = cast_value
if key in path_params:
actual_path_params[argmap[key]] = cast_value
body_value = kwargs.get('body', None)
media_filename = kwargs.get('media_body', None)
if self._developerKey:
actual_query_params['key'] = self._developerKey
model = self._model
# If there is no schema for the response then presume a binary blob.
if 'response' not in methodDesc:
model = RawModel()
headers = {}
headers, params, query, body = model.request(headers,
actual_path_params, actual_query_params, body_value)
expanded_url = uritemplate.expand(pathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
resumable = None
multipart_boundary = ''
if media_filename:
# Ensure we end up with a valid MediaUpload object.
if isinstance(media_filename, basestring):
(media_mime_type, encoding) = mimetypes.guess_type(media_filename)
if media_mime_type is None:
raise UnknownFileType(media_filename)
if not mimeparse.best_match([media_mime_type], ','.join(accept)):
raise UnacceptableMimeTypeError(media_mime_type)
media_upload = MediaFileUpload(media_filename, media_mime_type)
elif isinstance(media_filename, MediaUpload):
media_upload = media_filename
else:
raise TypeError('media_filename must be str or MediaUpload.')
# Check the maxSize
if maxSize > 0 and media_upload.size() > maxSize:
raise MediaUploadSizeError("Media larger than: %s" % maxSize)
# Use the media path uri for media uploads
expanded_url = uritemplate.expand(mediaPathUrl, params)
url = urlparse.urljoin(self._baseUrl, expanded_url + query)
if media_upload.resumable():
url = _add_query_parameter(url, 'uploadType', 'resumable')
if media_upload.resumable():
# This is all we need to do for resumable, if the body exists it gets
# sent in the first request, otherwise an empty body is sent.
resumable = media_upload
else:
# A non-resumable upload
if body is None:
# This is a simple media upload
headers['content-type'] = media_upload.mimetype()
body = media_upload.getbytes(0, media_upload.size())
url = _add_query_parameter(url, 'uploadType', 'media')
else:
# This is a multipart/related upload.
msgRoot = MIMEMultipart('related')
# msgRoot should not write out it's own headers
setattr(msgRoot, '_write_headers', lambda self: None)
# attach the body as one part
msg = MIMENonMultipart(*headers['content-type'].split('/'))
msg.set_payload(body)
msgRoot.attach(msg)
# attach the media as the second part
msg = MIMENonMultipart(*media_upload.mimetype().split('/'))
msg['Content-Transfer-Encoding'] = 'binary'
payload = media_upload.getbytes(0, media_upload.size())
msg.set_payload(payload)
msgRoot.attach(msg)
body = msgRoot.as_string()
multipart_boundary = msgRoot.get_boundary()
headers['content-type'] = ('multipart/related; '
'boundary="%s"') % multipart_boundary
url = _add_query_parameter(url, 'uploadType', 'multipart')
logger.info('URL being requested: %s' % url)
return self._requestBuilder(self._http,
model.response,
url,
method=httpMethod,
body=body,
headers=headers,
methodId=methodId,
resumable=resumable)
docs = [methodDesc.get('description', DEFAULT_METHOD_DOC), '\n\n']
if len(argmap) > 0:
docs.append('Args:\n')
for arg in argmap.iterkeys():
if arg in STACK_QUERY_PARAMETERS:
continue
repeated = ''
if arg in repeated_params:
repeated = ' (repeated)'
required = ''
if arg in required_params:
required = ' (required)'
paramdesc = methodDesc['parameters'][argmap[arg]]
paramdoc = paramdesc.get('description', 'A parameter')
if '$ref' in paramdesc:
docs.append(
(' %s: object, %s%s%s\n The object takes the'
' form of:\n\n%s\n\n') % (arg, paramdoc, required, repeated,
schema.prettyPrintByName(paramdesc['$ref'])))
else:
paramtype = paramdesc.get('type', 'string')
docs.append(' %s: %s, %s%s%s\n' % (arg, paramtype, paramdoc, required,
repeated))
enum = paramdesc.get('enum', [])
enumDesc = paramdesc.get('enumDescriptions', [])
if enum and enumDesc:
docs.append(' Allowed values\n')
for (name, desc) in zip(enum, enumDesc):
docs.append(' %s - %s\n' % (name, desc))
if 'response' in methodDesc:
docs.append('\nReturns:\n An object of the form\n\n ')
docs.append(schema.prettyPrintSchema(methodDesc['response']))
setattr(method, '__doc__', ''.join(docs))
setattr(theclass, methodName, method)
def createNextMethodFromFuture(theclass, methodName, methodDesc, futureDesc):
""" This is a legacy method, as only Buzz and Moderator use the future.json
functionality for generating _next methods. It will be kept around as long
as those API versions are around, but no new APIs should depend upon it.
"""
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous):
"""Retrieve the next page of results.
Takes a single argument, 'body', which is the results
from the last call, and returns the next set of items
in the collection.
Returns:
None if there are no more items in the collection.
"""
if futureDesc['type'] != 'uri':
raise UnknownLinkType(futureDesc['type'])
try:
p = previous
for key in futureDesc['location']:
p = p[key]
url = p
except (KeyError, TypeError):
return None
url = _add_query_parameter(url, 'key', self._developerKey)
headers = {}
headers, params, query, body = self._model.request(headers, {}, {}, None)
logger.info('URL being requested: %s' % url)
resp, content = self._http.request(url, method='GET', headers=headers)
return self._requestBuilder(self._http,
self._model.response,
url,
method='GET',
headers=headers,
methodId=methodId)
setattr(theclass, methodName, methodNext)
def createNextMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
methodId = methodDesc['id'] + '.next'
def methodNext(self, previous_request, previous_response):
"""Retrieves the next page of results.
Args:
previous_request: The request for the previous page.
previous_response: The response from the request for the previous page.
Returns:
A request object that you can call 'execute()' on to request the next
page. Returns None if there are no more items in the collection.
"""
# Retrieve nextPageToken from previous_response
# Use as pageToken in previous_request to create new request.
if 'nextPageToken' not in previous_response:
return None
request = copy.copy(previous_request)
pageToken = previous_response['nextPageToken']
parsed = list(urlparse.urlparse(request.uri))
q = parse_qsl(parsed[4])
# Find and remove old 'pageToken' value from URI
newq = [(key, value) for (key, value) in q if key != 'pageToken']
newq.append(('pageToken', pageToken))
parsed[4] = urllib.urlencode(newq)
uri = urlparse.urlunparse(parsed)
request.uri = uri
logger.info('URL being requested: %s' % uri)
return request
setattr(theclass, methodName, methodNext)
# Add basic methods to Resource
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if futureDesc:
future = futureDesc['methods'].get(methodName, {})
else:
future = None
createMethod(Resource, methodName, methodDesc, future)
# Add in nested resources
if 'resources' in resourceDesc:
def createResourceMethod(theclass, methodName, methodDesc, futureDesc):
methodName = _fix_method_name(methodName)
def methodResource(self):
return createResource(self._http, self._baseUrl, self._model,
self._requestBuilder, self._developerKey,
methodDesc, futureDesc, schema)
setattr(methodResource, '__doc__', 'A collection resource.')
setattr(methodResource, '__is_resource__', True)
setattr(theclass, methodName, methodResource)
for methodName, methodDesc in resourceDesc['resources'].iteritems():
if futureDesc and 'resources' in futureDesc:
future = futureDesc['resources'].get(methodName, {})
else:
future = {}
createResourceMethod(Resource, methodName, methodDesc, future)
# Add <m>_next() methods to Resource
if futureDesc and 'methods' in futureDesc:
for methodName, methodDesc in futureDesc['methods'].iteritems():
if 'next' in methodDesc and methodName in resourceDesc['methods']:
createNextMethodFromFuture(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodDesc['next'])
# Add _next() methods
# Look for response bodies in schema that contain nextPageToken, and methods
# that take a pageToken parameter.
if 'methods' in resourceDesc:
for methodName, methodDesc in resourceDesc['methods'].iteritems():
if 'response' in methodDesc:
responseSchema = methodDesc['response']
if '$ref' in responseSchema:
responseSchema = schema.get(responseSchema['$ref'])
hasNextPageToken = 'nextPageToken' in responseSchema.get('properties',
{})
hasPageToken = 'pageToken' in methodDesc.get('parameters', {})
if hasNextPageToken and hasPageToken:
createNextMethod(Resource, methodName + '_next',
resourceDesc['methods'][methodName],
methodName)
return Resource()
| Python |
__version__ = "1.0b9"
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model objects for requests and responses.
Each API may support one or more serializations, such
as JSON, Atom, etc. The model classes are responsible
for converting between the wire format and the Python
object representation.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import logging
import urllib
from errors import HttpError
from oauth2client.anyjson import simplejson
FLAGS = gflags.FLAGS
gflags.DEFINE_boolean('dump_request_response', False,
'Dump all http server requests and responses. '
)
def _abstract():
raise NotImplementedError('You need to override this function')
class Model(object):
"""Model base class.
All Model classes should implement this interface.
The Model serializes and de-serializes between a wire
format such as JSON and a Python object representation.
"""
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized in the desired wire format.
"""
_abstract()
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
_abstract()
class BaseModel(Model):
"""Base model class.
Subclasses should provide implementations for the "serialize" and
"deserialize" methods, as well as values for the following class attributes.
Attributes:
accept: The value to use for the HTTP Accept header.
content_type: The value to use for the HTTP Content-type header.
no_content_response: The value to return when deserializing a 204 "No
Content" response.
alt_param: The value to supply as the "alt" query parameter for requests.
"""
accept = None
content_type = None
no_content_response = None
alt_param = None
def _log_request(self, headers, path_params, query, body):
"""Logs debugging information about the request if requested."""
if FLAGS.dump_request_response:
logging.info('--request-start--')
logging.info('-headers-start-')
for h, v in headers.iteritems():
logging.info('%s: %s', h, v)
logging.info('-headers-end-')
logging.info('-path-parameters-start-')
for h, v in path_params.iteritems():
logging.info('%s: %s', h, v)
logging.info('-path-parameters-end-')
logging.info('body: %s', body)
logging.info('query: %s', query)
logging.info('--request-end--')
def request(self, headers, path_params, query_params, body_value):
"""Updates outgoing requests with a serialized body.
Args:
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query_params: dict, parameters that appear in the query
body_value: object, the request body as a Python object, which must be
serializable by simplejson.
Returns:
A tuple of (headers, path_params, query, body)
headers: dict, request headers
path_params: dict, parameters that appear in the request path
query: string, query part of the request URI
body: string, the body serialized as JSON
"""
query = self._build_query(query_params)
headers['accept'] = self.accept
headers['accept-encoding'] = 'gzip, deflate'
if 'user-agent' in headers:
headers['user-agent'] += ' '
else:
headers['user-agent'] = ''
headers['user-agent'] += 'google-api-python-client/1.0'
if body_value is not None:
headers['content-type'] = self.content_type
body_value = self.serialize(body_value)
self._log_request(headers, path_params, query, body_value)
return (headers, path_params, query, body_value)
def _build_query(self, params):
"""Builds a query string.
Args:
params: dict, the query parameters
Returns:
The query parameters properly encoded into an HTTP URI query string.
"""
if self.alt_param is not None:
params.update({'alt': self.alt_param})
astuples = []
for key, value in params.iteritems():
if type(value) == type([]):
for x in value:
x = x.encode('utf-8')
astuples.append((key, x))
else:
if getattr(value, 'encode', False) and callable(value.encode):
value = value.encode('utf-8')
astuples.append((key, value))
return '?' + urllib.urlencode(astuples)
def _log_response(self, resp, content):
"""Logs debugging information about the response if requested."""
if FLAGS.dump_request_response:
logging.info('--response-start--')
for h, v in resp.iteritems():
logging.info('%s: %s', h, v)
if content:
logging.info(content)
logging.info('--response-end--')
def response(self, resp, content):
"""Convert the response wire format into a Python object.
Args:
resp: httplib2.Response, the HTTP response headers and status
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
Raises:
apiclient.errors.HttpError if a non 2xx response is received.
"""
self._log_response(resp, content)
# Error handling is TBD, for example, do we retry
# for some operation/error combinations?
if resp.status < 300:
if resp.status == 204:
# A 204: No Content response should be treated differently
# to all the other success states
return self.no_content_response
return self.deserialize(content)
else:
logging.debug('Content from bad request was: %s' % content)
raise HttpError(resp, content)
def serialize(self, body_value):
"""Perform the actual Python object serialization.
Args:
body_value: object, the request body as a Python object.
Returns:
string, the body in serialized form.
"""
_abstract()
def deserialize(self, content):
"""Perform the actual deserialization from response string to Python
object.
Args:
content: string, the body of the HTTP response
Returns:
The body de-serialized as a Python object.
"""
_abstract()
class JsonModel(BaseModel):
"""Model class for JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request and response bodies.
"""
accept = 'application/json'
content_type = 'application/json'
alt_param = 'json'
def __init__(self, data_wrapper=False):
"""Construct a JsonModel.
Args:
data_wrapper: boolean, wrap requests and responses in a data wrapper
"""
self._data_wrapper = data_wrapper
def serialize(self, body_value):
if (isinstance(body_value, dict) and 'data' not in body_value and
self._data_wrapper):
body_value = {'data': body_value}
return simplejson.dumps(body_value)
def deserialize(self, content):
body = simplejson.loads(content)
if isinstance(body, dict) and 'data' in body:
body = body['data']
return body
@property
def no_content_response(self):
return {}
class RawModel(JsonModel):
"""Model class for requests that don't return JSON.
Serializes and de-serializes between JSON and the Python
object representation of HTTP request, and returns the raw bytes
of the response body.
"""
accept = '*/*'
content_type = 'application/json'
alt_param = None
def deserialize(self, content):
return content
@property
def no_content_response(self):
return ''
class ProtocolBufferModel(BaseModel):
"""Model class for protocol buffers.
Serializes and de-serializes the binary protocol buffer sent in the HTTP
request and response bodies.
"""
accept = 'application/x-protobuf'
content_type = 'application/x-protobuf'
alt_param = 'proto'
def __init__(self, protocol_buffer):
"""Constructs a ProtocolBufferModel.
The serialzed protocol buffer returned in an HTTP response will be
de-serialized using the given protocol buffer class.
Args:
protocol_buffer: The protocol buffer class used to de-serialize a
response from the API.
"""
self._protocol_buffer = protocol_buffer
def serialize(self, body_value):
return body_value.SerializeToString()
def deserialize(self, content):
return self._protocol_buffer.FromString(content)
@property
def no_content_response(self):
return self._protocol_buffer()
def makepatch(original, modified):
"""Create a patch object.
Some methods support PATCH, an efficient way to send updates to a resource.
This method allows the easy construction of patch bodies by looking at the
differences between a resource before and after it was modified.
Args:
original: object, the original deserialized resource
modified: object, the modified deserialized resource
Returns:
An object that contains only the changes from original to modified, in a
form suitable to pass to a PATCH method.
Example usage:
item = service.activities().get(postid=postid, userid=userid).execute()
original = copy.deepcopy(item)
item['object']['content'] = 'This is updated.'
service.activities.patch(postid=postid, userid=userid,
body=makepatch(original, item)).execute()
"""
patch = {}
for key, original_value in original.iteritems():
modified_value = modified.get(key, None)
if modified_value is None:
# Use None to signal that the element is deleted
patch[key] = None
elif original_value != modified_value:
if type(original_value) == type({}):
# Recursively descend objects
patch[key] = makepatch(original_value, modified_value)
else:
# In the case of simple types or arrays we just replace
patch[key] = modified_value
else:
# Don't add anything to patch if there's no change
pass
for key in modified:
if key not in original:
patch[key] = modified[key]
return patch
| Python |
#!/usr/bin/python2.4
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from oauth2client.anyjson import simplejson
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
def __init__(self, resp, content, uri=None):
self.resp = resp
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
if self.resp.get('content-type', '').startswith('application/json'):
try:
data = simplejson.loads(self.content)
reason = data['error']['message']
except (ValueError, KeyError):
reason = self.content
else:
reason = self.resp.reason
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(Error):
"""Error occured during resumable upload."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Copy files from source to dest expanding symlinks along the way.
"""
from shutil import copytree
import gflags
import sys
FLAGS = gflags.FLAGS
# Ignore these files and directories when copying over files into the snapshot.
IGNORE = set(['.hg', 'httplib2', 'oauth2', 'simplejson', 'static', 'gflags.py',
'gflags_validators.py'])
# In addition to the above files also ignore these files and directories when
# copying over samples into the snapshot.
IGNORE_IN_SAMPLES = set(['apiclient', 'oauth2client', 'uritemplate'])
gflags.DEFINE_string('source', '.', 'Directory name to copy from.')
gflags.DEFINE_string('dest', 'snapshot', 'Directory name to copy to.')
def _ignore(path, names):
retval = set()
if path != '.':
retval = retval.union(IGNORE_IN_SAMPLES.intersection(names))
retval = retval.union(IGNORE.intersection(names))
return retval
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
copytree(FLAGS.source, FLAGS.dest, symlinks=True,
ignore=_ignore)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for Blogger.
Command-line application that retrieves the users blogs and posts.
Usage:
$ python blogger.py
You can also get help on all the command-line flags the program understands
by running:
$ python blogger.py --help
To get detailed log output run:
$ python blogger.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
import os
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/blogger',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('blogger.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("blogger", "v2", http=http)
try:
users = service.users()
# Retrieve this user's profile information
thisuser = users.get(userId="self").execute(http)
print "This user's display name is: %s" % thisuser['displayName']
# Retrieve the list of Blogs this user has write privileges on
thisusersblogs = users.blogs().list(userId="self").execute()
for blog in thisusersblogs['items']:
print "The blog named \"%s\" is at: %s" % (blog['name'], blog['url'])
posts = service.posts()
# List the posts for each blog this user has
for blog in thisusersblogs['items']:
print "The posts for %s:" % blog['name']
request = posts.list(blogId=blog['id'])
while request != None:
posts_doc = request.execute(http)
if 'items' in posts_doc and not (posts_doc['items'] is None):
for post in posts_doc['items']:
print " %s (%s)" % (post['title'], post['url'])
request = posts.list_next(request, posts_doc)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample for threading and queues.
A simple sample that processes many requests by constructing a threadpool and
passing client requests by a thread queue to be processed.
"""
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
import Queue
import gflags
import httplib2
import logging
import sys
import threading
import time
# How many threads to start.
NUM_THREADS = 3
# A list of URLs to shorten.
BULK = [
"https://code.google.com/apis/moderator/",
"https://code.google.com/apis/latitude/",
"https://code.google.com/apis/urlshortener/",
"https://code.google.com/apis/customsearch/",
"https://code.google.com/apis/shopping/search/",
"https://code.google.com/apis/predict",
"https://code.google.com/more",
]
FLAGS = gflags.FLAGS
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/urlshortener',
user_agent='urlshortener-cmdline-sample/1.0')
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
queue = Queue.Queue()
class Backoff:
"""Exponential Backoff
Implements an exponential backoff algorithm.
Instantiate and call loop() each time through
the loop, and each time a request fails call
fail() which will delay an appropriate amount
of time.
"""
def __init__(self, maxretries=8):
self.retry = 0
self.maxretries = maxretries
self.first = True
def loop(self):
if self.first:
self.first = False
return True
else:
return self.retry < self.maxretries
def fail(self):
self.retry += 1
delay = 2 ** self.retry
time.sleep(delay)
def start_threads(credentials):
"""Create the thread pool to process the requests."""
def process_requests(n):
http = httplib2.Http()
http = credentials.authorize(http)
loop = True
while loop:
request = queue.get()
backoff = Backoff()
while backoff.loop():
try:
response = request.execute(http)
print "Processed: %s in thread %d" % (response['id'], n)
break
except HttpError, e:
if e.resp.status in [402, 403, 408, 503, 504]:
print "Increasing backoff, got status code: %d" % e.resp.status
backoff.fail()
except Exception, e:
print "Unexpected error. Exiting." + str(e)
loop = False
break
print "Completed request"
queue.task_done()
for i in range(NUM_THREADS):
t = threading.Thread(target=process_requests, args=[i])
t.daemon = True
t.start()
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
storage = Storage('threadqueue.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
start_threads(credentials)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("urlshortener", "v1", http=http,
developerKey="AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0")
shortener = service.url()
for url in BULK:
body = {"longUrl": url }
shorten_request = shortener.insert(body=body)
print "Adding request to queue"
queue.put(shorten_request)
# Wait for all the requests to finish
queue.join()
if __name__ == "__main__":
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Starting template for Google App Engine applications.
Use this project as a starting point if you are just beginning to build a Google
App Engine project. Remember to download the OAuth 2.0 client secrets which can
be obtained from the Developer Console <https://code.google.com/apis/console/>
and save them as 'client_secrets.json' in the project directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from oauth2client.appengine import oauth2decorator_from_clientsecrets
from oauth2client.client import AccessTokenRefreshError
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
<h1>Warning: Please configure OAuth 2.0</h1>
<p>
To make this sample run you will need to populate the client_secrets.json file
found at:
</p>
<p>
<code>%s</code>.
</p>
<p>with information found on the <a
href="https://code.google.com/apis/console">APIs Console</a>.
</p>
""" % CLIENT_SECRETS
http = httplib2.Http(memcache)
service = build("plus", "v1", http=http)
decorator = oauth2decorator_from_clientsecrets(
CLIENT_SECRETS,
'https://www.googleapis.com/auth/plus.me',
MISSING_CLIENT_SECRETS_MESSAGE)
class MainHandler(webapp.RequestHandler):
@decorator.oauth_aware
def get(self):
path = os.path.join(os.path.dirname(__file__), 'grant.html')
variables = {
'url': decorator.authorize_url(),
'has_credentials': decorator.has_credentials()
}
self.response.out.write(template.render(path, variables))
class AboutHandler(webapp.RequestHandler):
@decorator.oauth_required
def get(self):
try:
http = decorator.http()
user = service.people().get(userId='me').execute(http)
text = 'Hello, %s!' % user['displayName']
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
self.response.out.write(template.render(path, {'text': text }))
except AccessTokenRefreshError:
self.redirect('/')
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/about', AboutHandler),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sample for the Group Settings API demonstrates get and update method.
Usage:
$ python groupsettings.py
You can also get help on all the command-line flags the program understands
by running:
$ python groupsettings.py --help
"""
__author__ = 'Shraddha Gupta <shraddhag@google.com>'
from optparse import OptionParser
import os
import pprint
import sys
from apiclient.discovery import build
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
def access_settings(service, groupId, settings):
"""Retrieves a group's settings and updates the access permissions to it.
Args:
service: object service for the Group Settings API.
groupId: string identifier of the group@domain.
settings: dictionary key-value pairs of properties of group.
"""
# Get the resource 'group' from the set of resources of the API.
# The Group Settings API has only one resource 'group'.
group = service.groups()
# Retrieve the group properties
g = group.get(groupUniqueId=groupId).execute()
print '\nGroup properties for group %s\n' % g['name']
pprint.pprint(g)
# If dictionary is empty, return without updating the properties.
if not settings.keys():
print '\nGive access parameters to update group access permissions\n'
return
body = {}
# Settings might contain null value for some keys(properties).
# Extract the properties with values and add to dictionary body.
for key in settings.iterkeys():
if settings[key] is not None:
body[key] = settings[key]
# Update the properties of group
g1 = group.update(groupUniqueId=groupId, body=body).execute()
print '\nUpdated Access Permissions to the group\n'
pprint.pprint(g1)
def main(argv):
"""Demos the setting of the access properties by the Groups Settings API."""
usage = 'usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('--groupId',
help='Group email address')
parser.add_option('--whoCanInvite',
help='Possible values: ALL_MANAGERS_CAN_INVITE, '
'ALL_MEMBERS_CAN_INVITE')
parser.add_option('--whoCanJoin',
help='Possible values: ALL_IN_DOMAIN_CAN_JOIN, '
'ANYONE_CAN_JOIN, CAN_REQUEST_TO_JOIN, '
'CAN_REQUEST_TO_JOIN')
parser.add_option('--whoCanPostMessage',
help='Possible values: ALL_IN_DOMAIN_CAN_POST, '
'ALL_MANAGERS_CAN_POST, ALL_MEMBERS_CAN_POST, '
'ANYONE_CAN_POST, NONE_CAN_POST')
parser.add_option('--whoCanViewGroup',
help='Possible values: ALL_IN_DOMAIN_CAN_VIEW, '
'ALL_MANAGERS_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, '
'ANYONE_CAN_VIEW')
parser.add_option('--whoCanViewMembership',
help='Possible values: ALL_IN_DOMAIN_CAN_VIEW, '
'ALL_MANAGERS_CAN_VIEW, ALL_MEMBERS_CAN_VIEW, '
'ANYONE_CAN_VIEW')
(options, args) = parser.parse_args()
if options.groupId is None:
print 'Give the groupId for the group'
parser.print_help()
return
settings = {}
if (options.whoCanInvite or options.whoCanJoin or options.whoCanPostMessage
or options.whoCanPostMessage or options.whoCanViewMembership) is None:
print 'No access parameters given in input to update access permissions'
parser.print_help()
else:
settings = {'whoCanInvite': options.whoCanInvite,
'whoCanJoin': options.whoCanJoin,
'whoCanPostMessage': options.whoCanPostMessage,
'whoCanViewGroup': options.whoCanViewGroup,
'whoCanViewMembership': options.whoCanViewMembership}
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/apps.groups.settings',
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage('groupsettings.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
print 'invalid credentials'
# Save the credentials in storage to be used in subsequent runs.
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('groupssettings', 'v1', http=http)
access_settings(service=service, groupId=options.groupId, settings=settings)
if __name__ == '__main__':
main(sys.argv)
| Python |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from apiclient.discovery import build
import httplib2
from oauth2client.appengine import OAuth2Decorator
import settings
decorator = OAuth2Decorator(client_id=settings.CLIENT_ID,
client_secret=settings.CLIENT_SECRET,
scope=settings.SCOPE,
user_agent='mytasks')
class MainHandler(webapp.RequestHandler):
@decorator.oauth_aware
def get(self):
if decorator.has_credentials():
service = build('tasks', 'v1', http=decorator.http())
result = service.tasks().list(tasklist='@default').execute()
tasks = result.get('items', [])
for task in tasks:
task['title_short'] = truncate(task['title'], 26)
self.response.out.write(template.render('templates/index.html',
{'tasks': tasks}))
else:
url = decorator.authorize_url()
self.response.out.write(template.render('templates/index.html',
{'tasks': [],
'authorize_url': url}))
def truncate(s, l):
return s[:l] + '...' if len(s) > l else s
application = webapp.WSGIApplication([('/', MainHandler)], debug=True)
def main():
run_wsgi_app(application)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line example for Moderator.
Command-line application that exercises the Google Moderator API.
Usage:
$ python moderator.py
You can also get help on all the command-line flags the program understands
by running:
$ python moderator.py --help
To get detailed log output run:
$ python moderator.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='https://www.googleapis.com/auth/moderator',
user_agent='moderator-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('moderator.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("moderator", "v1", http=http)
try:
# Create a new Moderator series.
series_body = {
"description": "Share and rank tips for eating healthy and cheap!",
"name": "Eating Healthy & Cheap",
"videoSubmissionAllowed": False
}
series = service.series().insert(body=series_body).execute()
print "Created a new series"
# Create a new Moderator topic in that series.
topic_body = {
"description": "Share your ideas on eating healthy!",
"name": "Ideas",
"presenter": "liz"
}
topic = service.topics().insert(seriesId=series['id']['seriesId'],
body=topic_body).execute()
print "Created a new topic"
# Create a new Submission in that topic.
submission_body = {
"attachmentUrl": "http://www.youtube.com/watch?v=1a1wyc5Xxpg",
"attribution": {
"displayName": "Bashan",
"location": "Bainbridge Island, WA"
},
"text": "Charlie Ayers @ Google"
}
submission = service.submissions().insert(seriesId=topic['id']['seriesId'],
topicId=topic['id']['topicId'], body=submission_body).execute()
print "Inserted a new submisson on the topic"
# Vote on that newly added Submission.
vote_body = {
"vote": "PLUS"
}
service.votes().insert(seriesId=topic['id']['seriesId'],
submissionId=submission['id']['submissionId'],
body=vote_body)
print "Voted on the submission"
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Google Prediction API
Command-line application that trains on your input data. This sample does
the same thing as the Hello Prediction! example. You might want to run
the setup.sh script to load the sample data to Google Storage.
Usage:
$ python prediction.py --object_name="bucket/object" --id="model_id"
You can also get help on all the command-line flags the program understands
by running:
$ python prediction.py --help
To get detailed log output run:
$ python prediction.py --logging_level=DEBUG
"""
__author__ = ('jcgregorio@google.com (Joe Gregorio), '
'marccohen@google.com (Marc Cohen)')
import apiclient.errors
import gflags
import httplib2
import logging
import os
import pprint
import sys
import time
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'samples/prediction/client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/prediction',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_string('object_name',
None,
'Full Google Storage path of csv data (ex bucket/object)')
gflags.MarkFlagAsRequired('object_name')
gflags.DEFINE_string('id',
None,
'Model Id of your choosing to name trained model')
gflags.MarkFlagAsRequired('id')
# Time to wait (in seconds) between successive checks of training status.
SLEEP_TIME = 10
def print_header(line):
'''Format and print header block sized to length of line'''
header_str = '='
header_line = header_str * len(line)
print '\n' + header_line
print line
print header_line
def main(argv):
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('prediction.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
try:
# Get access to the Prediction API.
service = build("prediction", "v1.5", http=http)
papi = service.trainedmodels()
# List models.
print_header('Fetching list of first ten models')
result = papi.list(maxResults=10).execute()
print 'List results:'
pprint.pprint(result)
# Start training request on a data set.
print_header('Submitting model training request')
body = {'id': FLAGS.id, 'storageDataLocation': FLAGS.object_name}
start = papi.insert(body=body).execute()
print 'Training results:'
pprint.pprint(start)
# Wait for the training to complete.
print_header('Waiting for training to complete')
while True:
status = papi.get(id=FLAGS.id).execute()
state = status['trainingStatus']
print 'Training state: ' + state
if state == 'DONE':
break
elif state == 'RUNNING':
time.sleep(SLEEP_TIME)
continue
else:
raise Exception('Training Error: ' + state)
# Job has completed.
print 'Training completed:'
pprint.pprint(status)
break
# Describe model.
print_header('Fetching model description')
result = papi.analyze(id=FLAGS.id).execute()
print 'Analyze results:'
pprint.pprint(result)
# Make a prediction using the newly trained model.
print_header('Making a prediction')
body = {'input': {'csvInstance': ["mucho bueno"]}}
result = papi.predict(body=body, id=FLAGS.id).execute()
print 'Prediction results...'
pprint.pprint(result)
# Delete model.
print_header('Deleting model')
result = papi.delete(id=FLAGS.id).execute()
print 'Model deleted.'
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Basic query against the public shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products available in the
United States.
Note: The source and country arguments are required to pass to the list
method.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
'''Simple command-line example for The Google Search
API for Shopping.
Command-line application that does a search for products.
'''
__author__ = 'aherrman@google.com (Andy Herrman)'
from apiclient.discovery import build
# Uncomment the next line to get very detailed logging
# httplib2.debuglevel = 4
def main():
p = build('shopping', 'v1',
developerKey='AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0')
# Search over all public offers:
print 'Searching all public offers.'
res = p.products().list(
country='US',
source='public',
q='android t-shirt'
).execute()
print_items(res['items'])
# Search over a specific merchant's offers:
print
print 'Searching Google Store.'
res = p.products().list(
country='US',
source='public',
q='android t-shirt',
restrictBy='accountId:5968952',
).execute()
print_items(res['items'])
# Remember the Google Id of the last product
googleId = res['items'][0]['product']['googleId']
# Get data for the single public offer:
print
print 'Getting data for offer %s' % googleId
res = p.products().get(
source='public',
accountId='5968952',
productIdType='gid',
productId=googleId
).execute()
print_item(res)
def print_item(item):
"""Displays a single item: title, merchant, link."""
product = item['product']
print '- %s [%s] (%s)' % (product['title'],
product['author']['name'],
product['link'])
def print_items(items):
"""Displays a number of items."""
for item in items:
print_item(item)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Queries with paginated results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a the entire paginated feed of public products in the United
States.
Pagination is controlled with the "startIndex" parameter passed to the list
method of the resource.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The first request contains the information we need for the total items, and
# page size, as well as returning the first page of results.
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
itemsPerPage = response['itemsPerPage']
totalItems = response['totalItems']
for i in range(1, totalItems, itemsPerPage):
answer = raw_input('About to display results from %s to %s, y/(n)? ' %
(i, i + itemsPerPage))
if answer.strip().lower().startswith('n'):
# Stop if the user has had enough
break
else:
# Fetch this series of results
request = resource.list(source='public', country='US',
q=u'digital camera', startIndex=i)
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a histogram of the top 15 brand distribution for a search
query.
Histograms are created by using the "Facets" functionality of the API. A
Facet is a view of a certain property of products, containing a number of
buckets, one for each value of that property. Or concretely, for a parameter
such as "brand" of a product, the facets would include a facet for brand,
which would contain a number of buckets, one for each brand returned in the
result.
A bucket contains either a value and a count, or a value and a range. In the
simple case of a value and a count for our example of the "brand" property,
the value would be the brand name, eg "sony" and the count would be the
number of results in the search.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US', q=u'digital camera',
facets_include='brand:15', facets_enabled=True)
response = request.execute()
# Pick the first and only facet for this query
facet = response['facets'][0]
print '\n\tHistogram for "%s":\n' % facet['property']
labels = []
values = []
for bucket in facet['buckets']:
labels.append(bucket['value'].rjust(20))
values.append(bucket['count'])
weighting = 50.0 / max(values)
for label, value in zip(labels, values):
print label, '#' * int(weighting * value), '(%s)' % value
print
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with ranked results against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' ranked by ascending price.
The list method for the resource should be called with the "rankBy"
parameter. 5 parameters to rankBy are currently supported by the API. They
are:
"relevancy"
"modificationTime:ascending"
"modificationTime:descending"
"price:ascending"
"price:descending"
These parameters can be combined
The default ranking is "relevancy" if the rankBy parameter is omitted.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The rankBy parameter to the list method causes results to be ranked, in
# this case by ascending price.
request = resource.list(source='public', country='US', q=u'digital camera',
rankBy='price:ascending')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query that is restricted by a parameter against the public shopping search
API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera", that are created by "Canon" available in the
United States.
The "restrictBy" parameter controls which types of results are returned.
Multiple values for a single restrictBy can be separated by the "|" operator,
so to look for all products created by Canon, Sony, or Apple:
restrictBy = 'brand:canon|sony|apple'
Multiple restricting parameters should be separated by a comma, so for
products created by Sony with the word "32GB" in the title:
restrictBy = 'brand:sony,title:32GB'
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
request = resource.list(source='public', country='US',
restrictBy='brand:canon', q='Digital Camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Query with grouping against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of public products in the United States mathing a
text search query for 'digital camera' and grouped by the 8 top brands.
The list method of the resource should be called with the "crowdBy"
parameter. Each parameter should be designed as <attribute>:<occurence>,
where <occurrence> is the number of that <attribute> that will be used. For
example, to crowd by the 5 top brands, the parameter would be "brand:5". The
possible rules for crowding are currently:
account_id:<occurrence> (eg account_id:5)
brand:<occurrence> (eg brand:5)
condition:<occurrence> (eg condition:3)
gtin:<occurrence> (eg gtin:10)
price:<occurrence> (eg price:10)
Multiple crowding rules should be specified by separating them with a comma,
for example to crowd by the top 5 brands and then condition of those items,
the parameter should be crowdBy="brand:5,condition:3"
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# The crowdBy parameter to the list method causes the results to be grouped,
# in this case by the top 8 brands.
request = resource.list(source='public', country='US', q=u'digital camera',
crowdBy='brand:8')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Full text search query against the shopping search API"""
import pprint
from apiclient.discovery import build
SHOPPING_API_VERSION = 'v1'
DEVELOPER_KEY = 'AIzaSyACZJW4JwcWwz5taR2gjIMNQrtgDLfILPc'
def main():
"""Get and print a feed of all public products matching the search query
"digital camera".
This is achieved by using the q query parameter to the list method.
The "|" operator can be used to search for alternative search terms, for
example: q = 'banana|apple' will search for bananas or apples.
Search phrases such as those containing spaces can be specified by
surrounding them with double quotes, for example q='"mp3 player"'. This can
be useful when combining with the "|" operator such as q = '"mp3
player"|ipod'.
"""
client = build('shopping', SHOPPING_API_VERSION, developerKey=DEVELOPER_KEY)
resource = client.products()
# Note the 'q' parameter, which will contain the value of the search query
request = resource.list(source='public', country='US', q=u'digital camera')
response = request.execute()
pprint.pprint(response)
if __name__ == '__main__':
main()
| Python |
# Django settings for django_sample project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = 'database.sqlite3'
DATABASE_USER = ''
DATABASE_PASSWORD = ''
DATABASE_HOST = ''
DATABASE_PORT = ''
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/New_York'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_=9hq-$t_uv1ckf&s!y2$9g$1dm*6p1cl%*!^mg=7gr)!zj32d'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
)
ROOT_URLCONF = 'django_sample.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates"
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'templates')
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django_sample.plus'
)
| Python |
import os
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
(r'^$', 'django_sample.plus.views.index'),
(r'^oauth2callback', 'django_sample.plus.views.auth_return'),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
(r'^accounts/login/$', 'django.contrib.auth.views.login',
{'template_name': 'plus/login.html'}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': os.path.join(os.path.dirname(__file__), 'static')
}),
)
| Python |
import pickle
import base64
from django.contrib import admin
from django.contrib.auth.models import User
from django.db import models
from oauth2client.django_orm import FlowField
from oauth2client.django_orm import CredentialsField
# The Flow could also be stored in memcache since it is short lived.
class FlowModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
flow = FlowField()
class CredentialsModel(models.Model):
id = models.ForeignKey(User, primary_key=True)
credential = CredentialsField()
class CredentialsAdmin(admin.ModelAdmin):
pass
class FlowAdmin(admin.ModelAdmin):
pass
admin.site.register(CredentialsModel, CredentialsAdmin)
admin.site.register(FlowModel, FlowAdmin)
| Python |
"""
This file demonstrates two different styles of tests (one doctest and one
unittest). These will both pass when you run "manage.py test".
Replace these with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.failUnlessEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
>>> 1 + 1 == 2
True
"""}
| Python |
import os
import logging
import httplib2
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from oauth2client.django_orm import Storage
from oauth2client.client import OAuth2WebServerFlow
from django_sample.plus.models import CredentialsModel
from django_sample.plus.models import FlowModel
from apiclient.discovery import build
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
STEP2_URI = 'http://localhost:8000/oauth2callback'
@login_required
def index(request):
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
credential = storage.get()
if credential is None or credential.invalid == True:
flow = OAuth2WebServerFlow(
client_id='[[Insert Client ID here.]]',
client_secret='[[Insert Client Secret here.]]',
scope='https://www.googleapis.com/auth/plus.me',
user_agent='plus-django-sample/1.0',
)
authorize_url = flow.step1_get_authorize_url(STEP2_URI)
f = FlowModel(id=request.user, flow=flow)
f.save()
return HttpResponseRedirect(authorize_url)
else:
http = httplib2.Http()
http = credential.authorize(http)
service = build("plus", "v1", http=http)
activities = service.activities()
activitylist = activities.list(collection='public',
userId='me').execute()
logging.info(activitylist)
return render_to_response('plus/welcome.html', {
'activitylist': activitylist,
})
@login_required
def auth_return(request):
try:
f = FlowModel.objects.get(id=request.user)
credential = f.flow.step2_exchange(request.REQUEST)
storage = Storage(CredentialsModel, 'id', request.user, 'credential')
storage.put(credential)
f.delete()
return HttpResponseRedirect("/")
except FlowModel.DoesNotExist:
pass
| Python |
#!/usr/bin/python
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("""Error: Can't find the file 'settings.py' in the
directory containing %r. It appears you've customized things. You'll
have to run django-admin.py, passing it your settings module.
(If the file settings.py does indeed exist, it's causing an ImportError
somehow.)\n""" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
"""Simple command-line example for Latitude.
Command-line application that sets the users
current location.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import httplib2
import pickle
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
from apiclient.ext.authtools import run
from apiclient.ext.file import Storage
# Uncomment to get detailed logging
# httplib2.debuglevel = 4
def main():
storage = Storage('latitude.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
auth_discovery = build("latitude", "v1").auth_discovery()
flow = FlowThreeLegged(auth_discovery,
# You MUST have a consumer key and secret tied to a
# registered domain to use the latitude API.
#
# https://www.google.com/accounts/ManageDomains
consumer_key='REGISTERED DOMAIN NAME',
consumer_secret='KEY GIVEN DURING REGISTRATION',
user_agent='google-api-client-python-latitude/1.0',
domain='REGISTERED DOMAIN NAME',
scope='https://www.googleapis.com/auth/latitude',
xoauth_displayname='Google API Latitude Example',
location='current',
granularity='city'
)
credentials = run(flow, storage)
http = httplib2.Http()
http = credentials.authorize(http)
service = build("latitude", "v1", http=http)
body = {
"data": {
"kind": "latitude#location",
"latitude": 37.420352,
"longitude": -122.083389,
"accuracy": 130,
"altitude": 35
}
}
print service.currentLocation().insert(body=body).execute()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Google Inc. All Rights Reserved.
# Portions copyright PSF License
# http://code.activestate.com/recipes/278731-creating-a-daemon-the-python-way/
"""A pm-action hook for setting timezone.
Uses the Google Latitude API and the geonames.org
API to find your cellphones latitude and longitude
and from the determine the timezone you are in,
and then sets the computer's timezone to that.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
import httplib2
import os
import pickle
import pprint
import subprocess
import sys
import time
import uritemplate
from apiclient.anyjson import simplejson
from apiclient.discovery import build
from apiclient.oauth import FlowThreeLegged
from apiclient.ext.authtools import run
from apiclient.ext.file import Storage
# Uncomment to get detailed logging
# httplib2.debuglevel = 4
# URI Template to convert latitude and longitude into a timezone
GEONAMES = 'http://api.geonames.org/timezoneJSON?lat={lat}&lng={long}&username=jcgregorio'
PID_FILE = '/var/lock/tznever.pid'
CACHE = '/var/local/tznever/.cache'
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default.
if (hasattr(os, "devnull")):
REDIRECT_TO = os.devnull
else:
REDIRECT_TO = "/dev/null"
def main():
storage = Storage('/var/local/tznever/latitude_credentials.dat')
credentials = storage.get()
if len(sys.argv) == 1:
if credentials is None or credentials.invalid == True:
auth_discovery = build('latitude', 'v1').auth_discovery()
flow = FlowThreeLegged(auth_discovery,
consumer_key='m-buzz.appspot.com',
consumer_secret='NQEHb4eU6GkjjFGe1MD5W6IC',
user_agent='tz-never/1.0',
domain='m-buzz.appspot.com',
scope='https://www.googleapis.com/auth/latitude',
xoauth_displayname='TZ Never Again',
location='current',
granularity='city'
)
credentials = run(flow, storage)
else:
print "You are already authorized"
else:
if credentials is None or credentials.invalid == True:
print "This app, tznever, is not authorized. Run from the command-line to re-authorize."
os.exit(1)
if len(sys.argv) > 1 and sys.argv[1] in ['hibernate', 'suspend']:
print "Hibernating"
# Kill off the possibly still running process by its pid
if os.path.isfile(PID_FILE):
f = file(PID_FILE, 'r')
pid = f.read()
f.close()
cmdline = ['/bin/kill', '-2', pid]
subprocess.Popen(cmdline)
os.unlink(PID_FILE)
elif len(sys.argv) > 1 and sys.argv[1] in ['thaw', 'resume']:
print "Resuming"
# write our pid out
f = file(PID_FILE, 'w')
f.write(str(os.getpid()))
f.close()
success = False
first_time = True
while not success:
try:
if not first_time:
time.sleep(5)
else:
first_time = False
print "Daemonizing so as not to gum up the works."
createDaemon()
# rewrite the PID file with our new PID
f = file(PID_FILE, 'w')
f.write(str(os.getpid()))
f.close()
http = httplib2.Http(CACHE)
http = credentials.authorize(http)
service = build('latitude', 'v1', http=http)
location = service.currentLocation().get(granularity='city').execute()
position = {
'lat': str(location['latitude']),
'long': str(location['longitude'])
}
http2 = httplib2.Http(CACHE)
resp, content = http2.request(uritemplate.expand(GEONAMES, position))
geodata = simplejson.loads(content)
tz = geodata['timezoneId']
f = file('/etc/timezone', 'w')
f.write(tz)
f.close()
cmdline = 'dpkg-reconfigure -f noninteractive tzdata'.split(' ')
subprocess.Popen(cmdline)
success = True
except httplib2.ServerNotFoundError, e:
print "still not connected, sleeping"
except KeyboardInterrupt, e:
if os.path.isfile(PID_FILE):
os.unlink(PID_FILE)
success = True
# clean up pid file
if os.path.isfile(PID_FILE):
os.unlink(PID_FILE)
def createDaemon():
"""Detach a process from the controlling terminal and run it in the
background as a daemon.
"""
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
# Duplicate standard input to standard output and standard error.
os.dup2(0, 1) # standard output (1)
os.dup2(0, 2) # standard error (2)
return(0)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This application produces formatted listings for Google Cloud
Storage buckets.
It takes a bucket name in the URL path and does an HTTP GET on the
corresponding Google Cloud Storage URL to obtain a listing of the
bucket contents. For example, if this app is invoked with the URI
http://bucket-list.appspot.com/foo, it would remove the bucket name
'foo', append it to the Google Cloud Storage service URI and send
a GET request to the resulting URI. The bucket listing is returned
in an XML document, which is prepended with a reference to an XSLT
style sheet for human readable presentation.
More information about using Google App Engine apps and service accounts
to call Google APIs can be found here:
<https://developers.google.com/accounts/docs/OAuth2ServiceAccount>
<http://code.google.com/appengine/docs/python/appidentity/overview.html>
"""
__author__ = 'marccohen@google.com (Marc Cohen)'
import httplib2
import logging
import os
import pickle
import re
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client.appengine import AppAssertionCredentials
# Constants for the XSL stylesheet and the Google Cloud Storage URI.
XSL = '\n<?xml-stylesheet href="/listing.xsl" type="text/xsl"?>\n';
URI = 'http://commondatastorage.googleapis.com'
# Obtain service account credentials and authorize HTTP connection.
credentials = AppAssertionCredentials(
scope='https://www.googleapis.com/auth/devstorage.read_write')
http = credentials.authorize(httplib2.Http(memcache))
class MainHandler(webapp.RequestHandler):
def get(self):
try:
# Derive desired bucket name from path after domain name.
bucket = self.request.path
if bucket[-1] == '/':
# Trim final slash, if necessary.
bucket = bucket[:-1]
# Send HTTP request to Google Cloud Storage to obtain bucket listing.
resp, content = http.request(URI + bucket, "GET")
if resp.status != 200:
# If error getting bucket listing, raise exception.
err = 'Error: ' + str(resp.status) + ', bucket: ' + bucket + \
', response: ' + str(content)
raise Exception(err)
# Edit returned bucket listing XML to insert a reference to our style
# sheet for nice formatting and send results to client.
content = re.sub('(<ListBucketResult)', XSL + '\\1', content)
self.response.headers['Content-Type'] = 'text/xml'
self.response.out.write(content)
except Exception as e:
self.response.headers['Content-Type'] = 'text/plain'
self.response.set_status(404)
self.response.out.write(str(e))
def main():
application = webapp.WSGIApplication(
[
('.*', MainHandler),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Starting template for Google App Engine applications.
Use this project as a starting point if you are just beginning to build a
Google App Engine project which will access and manage data held under a role
account for the App Engine app. More information about using Google App Engine
apps to call Google APIs can be found in Scenario 1 of the following document:
<https://sites.google.com/site/oauthgoog/Home/google-oauth2-assertion-flow>
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from apiclient.discovery import build
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from oauth2client.appengine import AppAssertionCredentials
credentials = AppAssertionCredentials(
scope='https://www.googleapis.com/auth/urlshortener')
http = credentials.authorize(httplib2.Http(memcache))
service = build("urlshortener", "v1", http=http)
class MainHandler(webapp.RequestHandler):
def get(self):
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
shortened = service.url().list().execute()
short_and_long = []
if 'items' in shortened:
short_and_long = [(item["id"], item["longUrl"]) for item in
shortened["items"]]
variables = {
'short_and_long': short_and_long,
}
self.response.out.write(template.render(path, variables))
def post(self):
long_url = self.request.get("longUrl")
credentials.refresh(http)
shortened = service.url().insert(body={"longUrl": long_url}).execute()
self.redirect("/")
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
],
debug=True)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for the Google+ API.
Command-line application that retrieves the users latest content and
then adds a new entry.
Usage:
$ python plus.py
You can also get help on all the command-line flags the program understands
by running:
$ python plus.py --help
To get detailed log output run:
$ python plus.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import os
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/plus.me',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('plus.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("plus", "v1", http=http)
try:
person = service.people().get(userId='me').execute(http)
print "Got your ID: %s" % person['displayName']
print
print "%-040s -> %s" % ("[Activitity ID]", "[Content]")
# Don't execute the request until we reach the paging loop below
request = service.activities().list(
userId=person['id'], collection='public')
# Loop over every activity and print the ID and a short snippet of content.
while ( request != None ):
activities_doc = request.execute()
for item in activities_doc.get('items', []):
print '%-040s -> %s' % (item['id'], item['object']['content'][:30])
request = service.activities().list_next(request, activities_doc)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command-line sample for the Google URL Shortener API.
Simple command-line example for Google URL Shortener API that shortens
a URI then expands it.
Usage:
$ python urlshortener.py
You can also get help on all the command-line flags the program understands
by running:
$ python urlshortener.py --help
To get detailed log output run:
$ python urlshortener.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='[[CLIENT ID GOES HERE]]',
client_secret='[[CLIENT SECRET GOES HERE]]',
scope='https://www.googleapis.com/auth/urlshortener',
user_agent='urlshortener-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('urlshortener.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("urlshortener", "v1", http=http)
try:
url = service.url()
# Create a shortened URL by inserting the URL into the url collection.
body = {"longUrl": "http://code.google.com/apis/urlshortener/" }
resp = url.insert(body=body).execute()
pprint.pprint(resp)
short_url = resp['id']
# Convert the shortened URL back into a long URL
resp = url.get(shortUrl=short_url).execute()
pprint.pprint(resp)
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Sample application for Python documentation of APIs.
This is running live at http://api-python-client-doc.appspot.com where it
provides a list of APIs and PyDoc documentation for all the generated API
surfaces as they appear in the google-api-python-client. In addition it also
provides a Google Gadget.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import inspect
import os
import pydoc
import re
from apiclient import discovery
from apiclient.errors import HttpError
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from oauth2client.anyjson import simplejson
DISCOVERY_URI = 'https://www.googleapis.com/discovery/v1/apis?preferred=true'
def get_directory_doc():
http = httplib2.Http(memcache)
ip = os.environ.get('REMOTE_ADDR', None)
uri = DISCOVERY_URI
if ip:
uri += ('&userIp=' + ip)
resp, content = http.request(uri)
directory = simplejson.loads(content)['items']
return directory
class MainHandler(webapp.RequestHandler):
"""Handles serving the main landing page.
"""
def get(self):
directory = get_directory_doc()
for item in directory:
item['title'] = item.get('title', item.get('description', ''))
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
class GadgetHandler(webapp.RequestHandler):
"""Handles serving the Google Gadget."""
def get(self):
directory = get_directory_doc()
for item in directory:
item['title'] = item.get('title', item.get('description', ''))
path = os.path.join(os.path.dirname(__file__), 'gadget.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
self.response.headers.add_header('Content-Type', 'application/xml')
class EmbedHandler(webapp.RequestHandler):
"""Handles serving a front page suitable for embedding."""
def get(self):
directory = get_directory_doc()
for item in directory:
item['title'] = item.get('title', item.get('description', ''))
path = os.path.join(os.path.dirname(__file__), 'embed.html')
self.response.out.write(
template.render(
path, {'directory': directory,
}))
def _render(resource):
"""Use pydoc helpers on an instance to generate the help documentation.
"""
obj, name = pydoc.resolve(type(resource))
return pydoc.html.page(
pydoc.describe(obj), pydoc.html.document(obj, name))
class ResourceHandler(webapp.RequestHandler):
"""Handles serving the PyDoc for a given collection.
"""
def get(self, service_name, version, collection):
http = httplib2.Http(memcache)
try:
resource = discovery.build(service_name, version, http=http)
except:
return self.error(404)
# descend the object path
if collection:
try:
path = collection.split('/')
if path:
for method in path:
resource = getattr(resource, method)()
except:
return self.error(404)
page = _render(resource)
collections = []
for name in dir(resource):
if not "_" in name and callable(getattr(resource, name)) and hasattr(
getattr(resource, name), '__is_resource__'):
collections.append(name)
if collection is None:
collection_path = ''
else:
collection_path = collection + '/'
for name in collections:
page = re.sub('strong>(%s)<' % name,
r'strong><a href="/%s/%s/%s">\1</a><' % (
service_name, version, collection_path + name), page)
# TODO(jcgregorio) breadcrumbs
# TODO(jcgregorio) sample code?
page = re.sub('<p>', r'<a href="/">Home</a><p>', page, 1)
self.response.out.write(page)
def main():
application = webapp.WSGIApplication(
[
(r'/', MainHandler),
(r'/_gadget/', GadgetHandler),
(r'/_embed/', EmbedHandler),
(r'/([^\/]*)/([^\/]*)(?:/(.*))?', ResourceHandler),
],
debug=False)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
# version: v1
# scope: https://www.googleapis.com/auth/moderator
# title: Simple command-line example for Moderator.
# description: Command-line application that exercises the Google Moderator API.
# Create a new Moderator series.
series_body = {
"description": "Share and rank tips for eating healthy and cheap!",
"name": "Eating Healthy & Cheap",
"videoSubmissionAllowed": False
}
series = service.series().insert(body=series_body).execute()
print "Created a new series"
# Create a new Moderator topic in that series.
topic_body = {
"description": "Share your ideas on eating healthy!",
"name": "Ideas",
"presenter": "liz"
}
topic = service.topics().insert(seriesId=series['id']['seriesId'],
body=topic_body).execute()
print "Created a new topic"
# Create a new Submission in that topic.
submission_body = {
"attachmentUrl": "http://www.youtube.com/watch?v=1a1wyc5Xxpg",
"attribution": {
"displayName": "Bashan",
"location": "Bainbridge Island, WA"
},
"text": "Charlie Ayers @ Google"
}
submission = service.submissions().insert(seriesId=topic['id']['seriesId'],
topicId=topic['id']['topicId'], body=submission_body).execute()
print "Inserted a new submisson on the topic"
# Vote on that newly added Submission.
vote_body = {
"vote": "PLUS"
}
service.votes().insert(seriesId=topic['id']['seriesId'],
submissionId=submission['id']['submissionId'],
body=vote_body)
print "Voted on the submission"
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""$title
$description
Usage:
$$ python $name.py
You can also get help on all the command-line flags the program understands
by running:
$$ python $name.py --help
To get detailed log output run:
$$ python $name.py --logging_level=DEBUG
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import gflags
import httplib2
import logging
import pprint
import sys
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the API Access tab on
# the Google APIs Console <http://code.google.com/apis/console>. When
# creating credentials for this application be sure to choose an Application
# type of "Installed application".
FLOW = OAuth2WebServerFlow(
client_id='433807057907.apps.googleusercontent.com',
client_secret='jigtZpMApkRxncxikFpR+SFg',
scope='$scope',
user_agent='$name-cmdline-sample/1.0')
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('$name.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build("$name", "$version", http=http)
try:
$content
except AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
if __name__ == '__main__':
main(sys.argv)
| Python |
# version: v1
# title: Command-line sample for the Google URL Shortener API.
# scope: https://www.googleapis.com/auth/urlshortener
# description: Simple command-line example for Google URL Shortener API that shortens a URI then expands it.
url = service.url()
# Create a shortened URL by inserting the URL into the url collection.
body = {"longUrl": "http://code.google.com/apis/urlshortener/" }
resp = url.insert(body=body).execute()
pprint.pprint(resp)
short_url = resp['id']
# Convert the shortened URL back into a long URL
resp = url.get(shortUrl=short_url).execute()
pprint.pprint(resp)
| Python |
# version: v1.2
# scope: https://www.googleapis.com/auth/prediction
# title: Simple command-line sample for the Google Prediction API
# description: Command-line application that trains on some data. This sample does the same thing as the Hello Prediction! example.
# Name of Google Storage bucket/object that contains the training data
OBJECT_NAME = "apiclient-prediction-sample/prediction_models/languages"
# Start training on a data set
train = service.training()
start = train.insert(data=OBJECT_NAME, body={}).execute()
print 'Started training'
pprint.pprint(start)
import time
# Wait for the training to complete
while True:
status = train.get(data=OBJECT_NAME).execute()
pprint.pprint(status)
if 'RUNNING' != status['trainingStatus']:
break
print 'Waiting for training to complete.'
time.sleep(10)
print 'Training is complete'
# Now make a prediction using that training
body = {'input': {'csvInstance': ["mucho bueno"]}}
prediction = service.predict(body=body, data=OBJECT_NAME).execute()
print 'The prediction is:'
pprint.pprint(prediction)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line example for Custom Search.
Command-line application that does a search.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import pprint
from apiclient.discovery import build
def main():
# Build a service object for interacting with the API. Visit
# the Google APIs Console <http://code.google.com/apis/console>
# to get an API key for your own application.
service = build("customsearch", "v1",
developerKey="AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0")
res = service.cse().list(
q='lectures',
cx='017576662512468239146:omuauf_lfve',
).execute()
pprint.pprint(res)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import os
import pickle
from oauth2client.appengine import CredentialsProperty
from oauth2client.appengine import StorageByKeyName
from oauth2client.client import OAuth2WebServerFlow
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp.util import login_required
FLOW = OAuth2WebServerFlow(
client_id='2ad565600216d25d9cde',
client_secret='03b56df2949a520be6049ff98b89813f17b467dc',
scope='read',
user_agent='oauth2client-sample/1.0',
auth_uri='https://api.dailymotion.com/oauth/authorize',
token_uri='https://api.dailymotion.com/oauth/token'
)
class Credentials(db.Model):
credentials = CredentialsProperty()
class MainHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
credentials = StorageByKeyName(
Credentials, user.user_id(), 'credentials').get()
if credentials is None or credentials.invalid == True:
callback = self.request.relative_url('/auth_return')
authorize_url = FLOW.step1_get_authorize_url(callback)
memcache.set(user.user_id(), pickle.dumps(FLOW))
self.redirect(authorize_url)
else:
http = httplib2.Http()
http = credentials.authorize(http)
resp, content = http.request('https://api.dailymotion.com/me')
path = os.path.join(os.path.dirname(__file__), 'welcome.html')
logout = users.create_logout_url('/')
variables = {
'content': content,
'logout': logout
}
self.response.out.write(template.render(path, variables))
class OAuthHandler(webapp.RequestHandler):
@login_required
def get(self):
user = users.get_current_user()
flow = pickle.loads(memcache.get(user.user_id()))
if flow:
credentials = flow.step2_exchange(self.request.params)
StorageByKeyName(
Credentials, user.user_id(), 'credentials').put(credentials)
self.redirect("/")
else:
pass
def main():
application = webapp.WSGIApplication(
[
('/', MainHandler),
('/auth_return', OAuthHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad clients for an account.
Tags: accounts.adclients.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account for which to get ad clients',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve ad client list in pages and display data as we receive it.
request = service.accounts().adclients().list(accountId=account_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
ad_clients = result['items']
for ad_client in ad_clients:
print ('Ad client for product "%s" with ID "%s" was found. '
% (ad_client['productCode'], ad_client['id']))
print ('\tSupports reporting: %s' %
(ad_client['supportsReporting'] and 'Yes' or 'No'))
request = service.adclients().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all URL channels in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: urlchannels.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('ad_client_id', None,
'The ad client ID for which to get URL channels',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
ad_client_id = gflags.FLAGS.ad_client_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve URL channel list in pages and display data as we receive it.
request = service.urlchannels().list(adClientId=ad_client_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
custom_channels = result['items']
url_channels = result['items']
for url_channel in url_channels:
print ('URL channel with URL pattern "%s" was found.'
% url_channel['urlPattern'])
request = service.customchannels().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom channels an ad unit has been added to.
To get ad clients, run get_all_ad_clients.py. To get ad units, run
get_all_ad_units.py.
Tags: customchannels.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account with the specified ad unit',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
gflags.DEFINE_string('ad_client_id', None,
'The ID of the ad client with the specified ad unit',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
gflags.DEFINE_string('ad_unit_id', None,
'The ID of the ad unit for which to get custom channels',
short_name='u')
gflags.MarkFlagAsRequired('ad_unit_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
ad_client_id = gflags.FLAGS.ad_client_id
ad_unit_id = gflags.FLAGS.ad_unit_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve custom channel list in pages and display data as we receive it.
request = service.accounts().adunits().customchannels().list(
accountId=account_id, adClientId=ad_client_id, adUnitId=ad_unit_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
custom_channels = result['items']
for custom_channel in custom_channels:
print ('Custom channel with code "%s" and name "%s" was found. '
% (custom_channel['code'], custom_channel['name']))
if 'targetingInfo' in custom_channel:
print ' Targeting info:'
targeting_info = custom_channel['targetingInfo']
if 'adsAppearOn' in targeting_info:
print ' Ads appear on: %s' % targeting_info['adsAppearOn']
if 'location' in targeting_info:
print ' Location: %s' % targeting_info['location']
if 'description' in targeting_info:
print ' Description: %s' % targeting_info['description']
if 'siteLanguage' in targeting_info:
print ' Site language: %s' % targeting_info['siteLanguage']
request = service.customchannels().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example retrieves a report for the specified ad client.
Please only use pagination if your application requires it due to memory or
storage constraints.
If you need to retrieve more than 5000 rows, please check generate_report.py, as
due to current limitations you will not be able to use paging for large reports.
To get ad clients, run get_all_ad_clients.py.
Tags: reports.generate
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# This is the maximum number of obtainable rows for paged reports.
ROW_LIMIT = 5000
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('ad_client_id', None,
'The ID of the ad client for which to generate a report',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
ad_client_id = gflags.FLAGS.ad_client_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve report in pages and display data as we receive it.
start_index = 0
rows_to_obtain = MAX_PAGE_SIZE
while True:
result = service.reports().generate(
startDate='2011-01-01', endDate='2011-08-31',
filter=['AD_CLIENT_ID==' + ad_client_id],
metric=['PAGE_VIEWS', 'AD_REQUESTS', 'AD_REQUESTS_COVERAGE',
'CLICKS', 'AD_REQUESTS_CTR', 'COST_PER_CLICK',
'AD_REQUESTS_RPM', 'EARNINGS'],
dimension=['DATE'],
sort=['+DATE'],
startIndex=start_index,
maxResults=rows_to_obtain).execute()
# If this is the first page, display the headers.
if start_index == 0:
for header in result['headers']:
print '%25s' % header['name'],
print
# Display results for this page.
for row in result['rows']:
for column in row:
print '%25s' % column,
print
start_index += len(result['rows'])
# Check to see if we're going to go above the limit and get as many
# results as we can.
if start_index + MAX_PAGE_SIZE > ROW_LIMIT:
rows_to_obtain = ROW_LIMIT - start_index
if rows_to_obtain <= 0:
break
if (start_index >= int(result['totalMatchedRows'])):
break
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auxiliary file for AdSense Management API code samples.
Handles various tasks to do with logging, authentication and initialization.
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import logging
import os
import sys
from apiclient.discovery import build
import gflags
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/adsense.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for applications.
# Run this program with the '--help' argument to see all the flags that it
# understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def process_flags(argv):
"""Uses the command-line flags to set the logging level."""
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def prepare_credentials():
"""Handles auth. Reuses credentialss if available or runs the auth flow."""
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('adsense.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
return credentials
def retrieve_service(http):
"""Retrieves an AdSense Management API service via the discovery service."""
# Construct a service object via the discovery service.
service = build("adsense", "v1.1", http=http)
return service
def initialize_service():
"""Builds instance of service from discovery data and does auth."""
# Create an httplib2.Http object to handle our HTTP requests.
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = prepare_credentials()
http = credentials.authorize(http)
# Retrieve service.
return retrieve_service(http)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: adunits.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('ad_client_id', None,
'The ad client ID for which to get ad units',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
ad_client_id = gflags.FLAGS.ad_client_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve ad unit list in pages and display data as we receive it.
request = service.adunits().list(adClientId=ad_client_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
ad_units = result['items']
for ad_unit in ad_units:
print ('Ad unit with code "%s", name "%s" and status "%s" was found. ' %
(ad_unit['code'], ad_unit['name'], ad_unit['status']))
request = service.adunits().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad units corresponding to a specified custom channel.
To get custom channels, run get_all_custom_channels.py.
Tags: accounts.customchannels.adunits.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account with the specified custom channel',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
gflags.DEFINE_string('ad_client_id', None,
'The ID of the ad client with the specified custom channel',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
gflags.DEFINE_string('custom_channel_id', None,
'The ID of the custom channel for which to get ad units',
short_name='x')
gflags.MarkFlagAsRequired('custom_channel_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
ad_client_id = gflags.FLAGS.ad_client_id
custom_channel_id = gflags.FLAGS.custom_channel_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve ad unit list in pages and display data as we receive it.
request = service.accounts().customchannels().adunits().list(
accountId=account_id, adClientId=ad_client_id,
customChannelId=custom_channel_id, maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
ad_units = result['items']
for ad_unit in ad_units:
print ('Ad unit with code "%s", name "%s" and status "%s" was found. ' %
(ad_unit['code'], ad_unit['name'], ad_unit['status']))
request = service.adunits().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets a specific account for the logged in user.
This includes the full tree of sub-accounts.
Tags: accounts.get
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account to use as the root of the tree',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve account.
request = service.accounts().get(accountId=account_id, tree=True)
account = request.execute()
if account:
display_tree(account)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
def display_tree(account, level=0):
print (' ' * level * 2 +
'Account with ID "%s" and name "%s" was found. ' %
(account['id'], account['name']))
if 'subAccounts' in account:
for sub_account in account['subAccounts']:
display_tree(sub_account, level + 1)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example retrieves a report for the specified ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: reports.generate
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('ad_client_id', None,
'The ID of the ad client for which to generate a report',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
ad_client_id = gflags.FLAGS.ad_client_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve report.
result = service.reports().generate(
startDate='2011-01-01', endDate='2011-08-31',
filter=['AD_CLIENT_ID==' + ad_client_id],
metric=['PAGE_VIEWS', 'AD_REQUESTS', 'AD_REQUESTS_COVERAGE',
'CLICKS', 'AD_REQUESTS_CTR', 'COST_PER_CLICK',
'AD_REQUESTS_RPM', 'EARNINGS'],
dimension=['DATE'],
sort=['+DATE']).execute()
# Display headers.
for header in result['headers']:
print '%25s' % header['name'],
print
# Display results.
for row in result['rows']:
for column in row:
print '%25s' % column,
print
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all ad clients for the logged in user's default account.
Tags: adclients.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve ad client list in pages and display data as we receive it.
request = service.adclients().list(maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
ad_clients = result['items']
for ad_client in ad_clients:
print ('Ad client for product "%s" with ID "%s" was found. '
% (ad_client['productCode'], ad_client['id']))
print ('\tSupports reporting: %s' %
(ad_client['supportsReporting'] and 'Yes' or 'No'))
request = service.adclients().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all accounts for the logged in user.
Tags: accounts.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve account list in pages and display data as we receive it.
request = service.accounts().list(maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
accounts = result['items']
for account in accounts:
print ('Account with ID "%s" and name "%s" was found. '
% (account['id'], account['name']))
request = service.accounts().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all custom channels in an ad client.
To get ad clients, run get_all_ad_clients.py.
Tags: customchannels.list
"""
__author__ = 'sergio.gomes@google.com (Sergio Gomes)'
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
MAX_PAGE_SIZE = 50
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('ad_client_id', None,
'The ad client ID for which to get custom channels',
short_name='c')
gflags.MarkFlagAsRequired('ad_client_id')
def main(argv):
# Process flags and read their values.
sample_utils.process_flags(argv)
ad_client_id = gflags.FLAGS.ad_client_id
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve custom channel list in pages and display data as we receive it.
request = service.customchannels().list(adClientId=ad_client_id,
maxResults=MAX_PAGE_SIZE)
while request is not None:
result = request.execute()
custom_channels = result['items']
for custom_channel in custom_channels:
print ('Custom channel with code "%s" and name "%s" was found. '
% (custom_channel['code'], custom_channel['name']))
if 'targetingInfo' in custom_channel:
print ' Targeting info:'
targeting_info = custom_channel['targetingInfo']
if 'adsAppearOn' in targeting_info:
print ' Ads appear on: %s' % targeting_info['adsAppearOn']
if 'location' in targeting_info:
print ' Location: %s' % targeting_info['location']
if 'description' in targeting_info:
print ' Description: %s' % targeting_info['description']
if 'siteLanguage' in targeting_info:
print ' Site language: %s' % targeting_info['siteLanguage']
request = service.customchannels().list_next(request, result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample that demonstrates service accounts.
Lists all the Google Task Lists associated with the given service account.
Service accounts are created in the Google API Console. See the documentation
for more information:
https://developers.google.com/console/help/#WhatIsKey
Usage:
$ python tasks.py
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import pprint
import sys
from apiclient.discovery import build
from oauth2client.client import SignedJwtAssertionCredentials
def main(argv):
# Load the key in PKCS 12 format that you downloaded from the Google API
# Console when you created your Service account.
f = file('key.p12', 'rb')
key = f.read()
f.close()
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with the Credentials. Note that the first parameter, service_account_name,
# is the Email address created for the Service account. It must be the email
# address associated with the key that was created.
credentials = SignedJwtAssertionCredentials(
'141491975384@developer.gserviceaccount.com',
key,
scope='https://www.googleapis.com/auth/tasks')
http = httplib2.Http()
http = credentials.authorize(http)
service = build("tasks", "v1", http=http)
# List all the tasklists for the account.
lists = service.tasklists().list().execute(http)
pprint.pprint(lists)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
"""Sample for retrieving event information from GAN."""
__author__ = 'leadpipe@google.com (Luke Blanshard)'
import apiclient
import gflags
import httplib2
import json
import logging
import os
import stat
import sys
from django.conf import settings
from django.template import Template, Context
from django.template.loader import get_template
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=('.'))
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = '../client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/gan.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'DEBUG',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_enum("output_type", 'STDOUT', ['BOTH', 'HTML', 'STDOUT'],
'Set how to output the results received from the API')
gflags.DEFINE_string('credentials_filename', '../credentials.dat',
'File to store credentials in', short_name='cf')
API_FLAGS = {'eventDateMin':None, 'eventDateMax':None, 'advertiserId':None,
'publisherId':None, 'orderId':None, 'sku':None,
'productCategory':None, 'linkId':None, 'memberId':None,
'status':None, 'type':None, 'role':None, 'roleId':None}
gflags.DEFINE_string(
'eventDateMin', None,
'RFC 3339 formatted min date. Ex: 2005-08-09-T10:57:00-08:00')
gflags.DEFINE_string(
'eventDateMax', None,
'RFC 3339 formatted max date. Ex: 2005-08-09-T10:57:00-08:00')
gflags.DEFINE_string('advertiserId', None,
'caret delimited advertiser IDs')
gflags.DEFINE_string('publisherId', None,
'caret delimited publisher IDs')
gflags.DEFINE_string('orderId', None,
'caret delimited order IDs')
gflags.DEFINE_string('sku', None,
'caret delimited SKUs')
gflags.DEFINE_string('productCategory', None,
'caret delimited product categories')
gflags.DEFINE_string('linkId', None,
'caret delimited link IDs')
gflags.DEFINE_string('memberId', None,
'caret delimited member IDs')
gflags.DEFINE_string('status', None,
'status of events - valid values "active" or "cancelled"')
gflags.DEFINE_string('type', None,
'type of events - valid values "action" or "transaction"')
def usage(argv):
print 'Usage: %s <role> <role-id>\n%s' % (argv[0], FLAGS)
sys.exit(1)
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print e
usage(argv)
if len(argv) != 3:
usage(argv)
params = {
'role': argv[1],
'roleId': argv[2]
}
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage(FLAGS.credentials_filename)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('gan', 'v1beta1', http=http)
events = service.events()
# Filter out all params that aren't set.
for key in FLAGS:
if key in API_FLAGS and FLAGS[key].value != None:
params[key] = FLAGS[key].value
# Retrieve the relevant events.
all_items = []
try:
request = events.list(**params)
while request:
response = request.execute()
if FLAGS.output_type in ["BOTH", "STDOUT"]:
print json.dumps(response, sort_keys=True, indent=4)
all_items.extend(response['items'])
request = events.list_next(request, response)
except apiclient.errors.HttpError, e:
print json.dumps(e.__dict__, sort_keys=True, indent=4)
if FLAGS.output_type in ["BOTH", "HTML"]:
template = get_template('events_template.html')
context = Context({'items':items})
out = open("output.html", 'w')
out.write(template.render(context).encode('UTF-8'))
os.fchmod(out.fileno(), stat.S_IROTH|stat.S_IRGRP|stat.S_IRUSR|stat.S_IWUSR)
out.close()
print 'Wrote output.html'
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
"""Sample for retrieving publisher information from GAN."""
__author__ = 'leadpipe@google.com (Luke Blanshard)'
import apiclient
import gflags
import httplib2
import json
import logging
import os
import stat
import sys
from django.conf import settings
from django.template import Template, Context
from django.template.loader import get_template
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=('.'))
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = '../client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/gan.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'DEBUG',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_enum("request_type", 'LIST', ['GET', 'LIST'],
'Type of request to be made')
gflags.DEFINE_enum("output_type", 'STDOUT', ['BOTH', 'HTML', 'STDOUT'],
'Set how to output the results received from the API')
gflags.DEFINE_string('credentials_filename', '../credentials.dat',
'File to store credentials in', short_name='cf')
API_FLAGS = {'relationshipStatus':None, 'publisherId':None, 'role':None,
'roleId':None, 'category':None, 'minSevenDayEpc':None,
'minNinetyDayEpc':None, 'minPayoutRank':None}
gflags.DEFINE_enum(
'relationshipStatus', None,
['APPROVED', 'AVAILABLE', 'PENDING', 'DECLINED', 'DEACTIVATED'],
'Status of the relationship')
gflags.DEFINE_string(
'publisherId', None,
'Publisher ID to lookup (get requests only).')
gflags.DEFINE_string('category', None,
'Caret delimited set of publisher categories to include'
+ ' (list requests only).')
gflags.DEFINE_string('minSevenDayEpc', None,
'Minimum value for the publisher\'s seven day EPC'
+ ' (list requests only).')
gflags.DEFINE_string('minNinetyDayEpc', None,
'Minimum value for the publisher\'s ninety day EPC'
+ ' (list requests only).')
gflags.DEFINE_enum('minPayoutRank', None, ['1', '2', '3', '4'],
'Minimum value for the publisher\'s payout rank'
+ ' (list requests only)')
def usage(argv):
print 'Usage: %s <role> <role-id>\n%s' % (argv[0], FLAGS)
sys.exit(1)
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print e
usage(argv)
if len(argv) != 3:
usage(argv)
params = {
'role': argv[1],
'roleId': argv[2]
}
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage(FLAGS.credentials_filename)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('gan', 'v1beta1', http=http)
publishers = service.publishers()
# Filter out all params that aren't set.
for key in FLAGS:
if key in API_FLAGS and FLAGS[key].value != None:
params[key] = FLAGS[key].value
# Retrieve the relevant publishers.
stdout = {}
html = {}
try:
if FLAGS.request_type == "GET":
get_call = publishers.get(**params)
stdout = get_call.execute()
html['items'] = [stdout['item']]
else:
all_items = []
request = publishers.list(**params)
while request:
response = request.execute()
if 'items' in response:
all_items.extend(response['items'])
request = publishers.list_next(request, response)
html['items'] = all_items
stdout = html
except apiclient.errors.HttpError, e:
print json.dumps(e.__dict__, sort_keys=True, indent=4)
if FLAGS.output_type in ["BOTH", "HTML"]:
template = get_template('publishers_template.html')
context = Context(html)
out = open("output.html", 'w')
out.write(template.render(context).encode('UTF-8'))
os.fchmod(out.fileno(), stat.S_IROTH|stat.S_IRGRP|stat.S_IRUSR|stat.S_IWUSR)
out.close()
print 'Wrote output.html'
if FLAGS.output_type in ["BOTH", "STDOUT"]:
print json.dumps(stdout, sort_keys=True, indent=4)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
"""Sample for retrieving advertiser information from GAN."""
__author__ = 'leadpipe@google.com (Luke Blanshard)'
import apiclient
import gflags
import httplib2
import json
import logging
import os
import stat
import sys
from django.conf import settings
from django.template import Template, Context
from django.template.loader import get_template
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=('.'))
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = '../client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/gan.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'DEBUG',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_enum("request_type", 'LIST', ['GET', 'LIST'],
'Type of request to be made')
gflags.DEFINE_enum("output_type", 'STDOUT', ['BOTH', 'HTML', 'STDOUT'],
'Set how to output the results received from the API')
gflags.DEFINE_string('credentials_filename', '../credentials.dat',
'File to store credentials in', short_name='cf')
API_FLAGS = {'relationshipStatus':None, 'advertiserId':None, 'role':None,
'roleId':None, 'category':None, 'minSevenDayEpc':None,
'minNinetyDayEpc':None, 'minPayoutRank':None}
gflags.DEFINE_enum(
'relationshipStatus', None,
['APPROVED', 'AVAILABLE', 'PENDING', 'DECLINED', 'DEACTIVATED'],
'Status of the relationship')
gflags.DEFINE_string(
'advertiserId', None,
'Advertiser ID to lookup (get requests only).')
gflags.DEFINE_string('category', None,
'Caret delimited set of advertiser categories to include'
+ ' (list requests only).')
gflags.DEFINE_string('minSevenDayEpc', None,
'Minimum value for the advertiser\'s seven day EPC'
+ ' (list requests only).')
gflags.DEFINE_string('minNinetyDayEpc', None,
'Minimum value for the advertiser\'s ninety day EPC'
+ ' (list requests only).')
gflags.DEFINE_enum('minPayoutRank', None, ['1', '2', '3', '4'],
'Minimum value for the advertiser\'s payout rank'
+ ' (list requests only)')
def usage(argv):
print 'Usage: %s <role> <role-id>\n%s' % (argv[0], FLAGS)
sys.exit(1)
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print e
usage(argv)
if len(argv) != 3:
usage(argv)
params = {
'role': argv[1],
'roleId': argv[2]
}
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage(FLAGS.credentials_filename)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('gan', 'v1beta1', http=http)
advertisers = service.advertisers()
# Filter out all params that aren't set.
for key in FLAGS:
if key in API_FLAGS and FLAGS[key].value != None:
params[key] = FLAGS[key].value
# Retrieve the relevant advertisers.
stdout = {}
html = {}
try:
if FLAGS.request_type == "GET":
get_call = advertisers.get(**params)
stdout = get_call.execute()
html['items'] = [stdout['item']]
else:
all_items = []
request = advertisers.list(**params)
while request:
response = request.execute()
if 'items' in response:
all_items.extend(response['items'])
request = advertisers.list_next(request, response)
html['items'] = all_items
stdout = html
except apiclient.errors.HttpError, e:
print json.dumps(e.__dict__, sort_keys=True, indent=4)
if FLAGS.output_type in ["BOTH", "HTML"]:
template = get_template('advertisers_template.html')
context = Context(html)
out = open("output.html", 'w')
out.write(template.render(context).encode('UTF-8'))
os.fchmod(out.fileno(), stat.S_IROTH|stat.S_IRGRP|stat.S_IRUSR|stat.S_IWUSR)
out.close()
print 'Wrote output.html'
if FLAGS.output_type in ["BOTH", "STDOUT"]:
print json.dumps(stdout, sort_keys=True, indent=4)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
"""Sample for retrieving credit-card offers from GAN."""
__author__ = 'leadpipe@google.com (Luke Blanshard)'
import gflags
import httplib2
import json
import logging
import os
import stat
import sys
from django.conf import settings
from django.template import Template, Context
from django.template.loader import get_template
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import flow_from_clientsecrets
from oauth2client.tools import run
settings.configure(DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=('.'))
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = '../client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/gan.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'DEBUG',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
gflags.DEFINE_enum("output_type", 'STDOUT', ['BOTH', 'HTML', 'STDOUT'],
'Set how to output the results received from the API')
gflags.DEFINE_string('credentials_filename', '../credentials.dat',
'File to store credentials in', short_name='cf')
gflags.DEFINE_multistring('advertiser', [],
'If given, advertiser we should run as')
def usage(argv):
print 'Usage: %s <publisher-id>\n%s' % (argv[0], FLAGS)
sys.exit(1)
def main(argv):
# Let the gflags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
raise e
usage(argv)
if len(argv) != 2:
usage(argv)
publisher = argv[1]
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage(FLAGS.credentials_filename)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('gan', 'v1beta1', http=http)
ccOffers = service.ccOffers()
# Retrieve the relevant offers.
request = ccOffers.list(publisher=publisher,
advertiser=FLAGS.advertiser,
projection='full')
response = request.execute()
response['publisher'] = publisher
if FLAGS.output_type in ["BOTH", "HTML"]:
template = get_template('offers_template.html')
context = Context(response)
fname = '%s.html' % publisher
out = open(fname, 'w')
out.write(template.render(context).encode('UTF-8'))
os.fchmod(out.fileno(), stat.S_IROTH|stat.S_IRGRP|stat.S_IRUSR|stat.S_IWUSR)
out.close()
print 'Wrote %s' % fname
if FLAGS.output_type in ["BOTH", "STDOUT"]:
print json.dumps(response, sort_keys=True, indent=4)
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line sample for Audit API.
Command-line application that retrieves events through the Audit API.
This works only for Google Apps for Business, Education, and ISP accounts.
It can not be used for the basic Google Apps product.
Usage:
$ python audit.py
You can also get help on all the command-line flags the program understands
by running:
$ python audit.py --help
To get detailed log output run:
$ python audit.py --logging_level=DEBUG
"""
__author__ = 'rahulpaul@google.com (Rahul Paul)'
import gflags
import httplib2
import logging
import re
import simplejson
import sys
from apiclient.discovery import build
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# When creating credentials for this application be sure to choose an
# Application type of 'Installed application'.
FLOW = OAuth2WebServerFlow(
client_id='880851855448.apps.googleusercontent.com',
client_secret='d8nBjlNBpOMH_LITqz31IMdI',
scope='https://www.googleapis.com/auth/apps/reporting/audit.readonly',
user_agent='audit-cmdline-sample/1.0')
# The flags module makes defining command-line options easy for
# applications. Run this program with the '--help' argument to see
# all the flags that it understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def print_activities(activity_list):
events = activity_list['items']
print '\nRetrieved %d activities.' % len(events)
for i in range(len(events)):
print '\nEvent %d : %s' % (i, simplejson.JSONEncoder().encode(events[i]))
print '\nNext URL : %s' % (activity_list['next'])
print '======================================================================'
def main(argv):
# Let the flags module process the command-line arguments
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
# If the Credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('audit.dat')
credentials = storage.get()
if not credentials or credentials.invalid:
credentials = run(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
service = build('audit', 'v1', http=http)
try:
activities = service.activities()
# Retrieve the first two activities
print 'Retrieving the first 2 activities...'
activity_list = activities.list(
applicationId='207535951991', customerId='C01rv1wm7', maxResults='2',
actorEmail='admin@enterprise-audit-clientlib.com').execute()
print_activities(activity_list)
# Now retrieve the next 2 events
match = re.search('(?<=continuationToken=).+$', activity_list['next'])
if match is not None:
next_token = match.group(0)
print '\nRetrieving the next 2 activities...'
activity_list = activities.list(
applicationId='207535951991', customerId='C01rv1wm7',
maxResults='2', actorEmail='admin@enterprise-audit-clientlib.com',
continuationToken=next_token).execute()
print_activities(activity_list)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Analytics API code samples.
Handles various tasks to do with logging, authentication and initialization.
Mostly taken from Sergio :)
Before You Begin:
You must update the client_secrets.json file with a client id, client secret,
and the redirect uri. You get these values by creating a new project
in the Google APIs console and registering for OAuth2.0 for installed
applications: https://code.google.com/apis/console
Also all OAuth2.0 tokens are stored for resue in the file specified
as TOKEN_FILE_NAME. You can modify this file name if you wish.
"""
__author__ = ('sergio.gomes@google.com (Sergio Gomes)'
'api.nickm@gmail.com (Nick Mihailovski)')
import logging
import os
import sys
from apiclient.discovery import build
import gflags
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret. You get these values by
# creating a new project in the Google APIs console and registering for
# OAuth2.0 for installed applications: <https://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/analytics.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for applications.
# Run this program with the '--help' argument to see all the flags that it
# understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
# Name of file that will store the access and refresh tokens to access
# the API without having to login each time. Make sure this file is in
# a secure place.
TOKEN_FILE_NAME = 'analytics.dat'
def process_flags(argv):
"""Uses the command-line flags to set the logging level.
Args:
argv: List of command line arguments passed to the python script.
"""
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def initialize_service():
"""Returns an instance of service from discovery data and does auth.
This method tries to read any existing OAuth 2.0 credentials from the
Storage object. If the credentials do not exist, new credentials are
obtained. The crdentials are used to authorize an http object. The
http object is used to build the analytics service object.
Returns:
An analytics v3 service object.
"""
# Create an httplib2.Http object to handle our HTTP requests.
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
storage = Storage(TOKEN_FILE_NAME)
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
http = credentials.authorize(http)
# Retrieve service.
return build('analytics', 'v3', http=http)
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference command-line example for Google Analytics Core Reporting API v3.
This application demonstrates how to use the python client library to access
all the pieces of data returned by the Google Analytics Core Reporting API v3.
The application manages autorization by saving an OAuth2.0 token in a local
file and reusing the token for subsequent requests.
Before You Begin:
Update the client_secrets.json file
You must update the clients_secrets.json file with a client id, client
secret, and the redirect uri. You get these values by creating a new project
in the Google APIs console and registering for OAuth2.0 for installed
applications: https://code.google.com/apis/console
Learn more about registering your analytics application here:
http://code.google.com/apis/analytics/docs/gdata/v3/gdataAuthorization.html
Supply your TABLE_ID
You will also need to identify from which profile to access data by
specifying the TABLE_ID constant below. This value is of the form: ga:xxxx
where xxxx is the profile ID. You can get the profile ID by either querying
the Management API or by looking it up in the account settings of the
Google Anlaytics web interface.
Sample Usage:
$ python core_reporting_v3_reference.py
Also you can also get help on all the command-line flags the program
understands by running:
$ python core_reporting_v3_reference.py --help
"""
__author__ = 'api.nickm@gmail.com (Nick Mihailovski)'
import sys
import sample_utils
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
# The table ID is used to identify from which Google Anlaytics profile
# to retrieve data. This ID is in the format ga:xxxx where xxxx is the
# profile ID.
TABLE_ID = 'INSERT_YOUR_TABLE_ID_HERE'
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
# Try to make a request to the API. Print the results or handle errors.
try:
results = get_api_query(service).execute()
print_results(results)
except TypeError, error:
# Handle errors in constructing a query.
print ('There was an error in constructing your query : %s' % error)
except HttpError, error:
# Handle API errors.
print ('Arg, there was an API error : %s : %s' %
(error.resp.status, error._get_reason()))
except AccessTokenRefreshError:
# Handle Auth errors.
print ('The credentials have been revoked or expired, please re-run '
'the application to re-authorize')
def get_api_query(service):
"""Returns a query object to retrieve data from the Core Reporting API.
Args:
service: The service object built by the Google API Python client library.
"""
return service.data().ga().get(
ids=TABLE_ID,
start_date='2012-01-01',
end_date='2012-01-15',
metrics='ga:visits',
dimensions='ga:source,ga:keyword',
sort='-ga:visits',
filters='ga:medium==organic',
start_index='1',
max_results='25')
def print_results(results):
"""Prints all the results in the Core Reporting API Response.
Args:
results: The response returned from the Core Reporting API.
"""
print_report_info(results)
print_pagination_info(results)
print_profile_info(results)
print_query(results)
print_column_headers(results)
print_totals_for_all_results(results)
print_rows(results)
def print_report_info(results):
"""Prints general information about this report.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Report Infos:'
print 'Contains Sampled Data = %s' % results.get('containsSampledData')
print 'Kind = %s' % results.get('kind')
print 'ID = %s' % results.get('id')
print 'Self Link = %s' % results.get('selfLink')
print
def print_pagination_info(results):
"""Prints common pagination details.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Pagination Infos:'
print 'Items per page = %s' % results.get('itemsPerPage')
print 'Total Results = %s' % results.get('totalResults')
# These only have values if other result pages exist.
if results.get('previousLink'):
print 'Previous Link = %s' % results.get('previousLink')
if results.get('nextLink'):
print 'Next Link = %s' % results.get('nextLink')
print
def print_profile_info(results):
"""Prints information about the profile.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Profile Infos:'
info = results.get('profileInfo')
print 'Account Id = %s' % info.get('accountId')
print 'Web Property Id = %s' % info.get('webPropertyId')
print 'Profile Id = %s' % info.get('profileId')
print 'Table Id = %s' % info.get('tableId')
print 'Profile Name = %s' % info.get('profileName')
print
def print_query(results):
"""The query returns the original report query as a dict.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Query Parameters:'
query = results.get('query')
for key, value in query.iteritems():
print '%s = %s' % (key, value)
print
def print_column_headers(results):
"""Prints the information for each column.
The main data from the API is returned as rows of data. The column
headers describe the names and types of each column in rows.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Column Headers:'
headers = results.get('columnHeaders')
for header in headers:
# Print Dimension or Metric name.
print '\t%s name: = %s' % (header.get('columnType').title(),
header.get('name'))
print '\tColumn Type = %s' % header.get('columnType')
print '\tData Type = %s' % header.get('dataType')
print
def print_totals_for_all_results(results):
"""Prints the total metric value for all pages the query matched.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Total Metrics For All Results:'
print 'This query returned %s rows.' % len(results.get('rows'))
print ('But the query matched %s total results.' %
results.get('totalResults'))
print 'Here are the metric totals for the matched total results.'
totals = results.get('totalsForAllResults')
for metric_name, metric_total in totals.iteritems():
print 'Metric Name = %s' % metric_name
print 'Metric Total = %s' % metric_total
print
def print_rows(results):
"""Prints all the rows of data returned by the API.
Args:
results: The response returned from the Core Reporting API.
"""
print 'Rows:'
if results.get('rows', []):
for row in results.get('rows'):
print '\t'.join(row)
else:
print 'No Rows Found'
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reference command-line example for Google Analytics Management API v3.
This application demonstrates how to use the python client library to access
all the pieces of data returned by the Google Analytics Management API v3.
The application manages autorization by saving an OAuth2.0 token in a local
file and reusing the token for subsequent requests. It then traverses the
Google Analytics Management hiearchy. It first retrieves and prints all the
authorized user's accounts, next it prints all the web properties for the
first account, then all the profiles for the first web property and finally
all the goals for the first profile. The sample then prints all the
user's advanced segments.
To read an indepth discussion on how this file works, check out the Management
API Python Getting Started guide here:
http://code.google.com/apis/analytics/docs/mgmt/v3/mgmtPython.html
Before You Begin:
Update the client_secrets.json file
You must update the clients_secrets.json file with a client id, client
secret, and the redirect uri. You get these values by creating a new project
in the Google APIs console and registering for OAuth2.0 for installed
applications: https://code.google.com/apis/console
Learn more about registering your analytics application here:
http://code.google.com/apis/analytics/docs/gdata/v3/gdataAuthorization.html
Sample Usage:
$ python management_v3_reference.py
Also you can also get help on all the command-line flags the program
understands by running:
$ python management_v3_reference.py --help
"""
__author__ = 'api.nickm@gmail.com (Nick Mihailovski)'
import sys
import sample_utils
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
# Traverse the Management hiearchy and print results or handle errors.
try:
traverse_hiearchy(service)
except TypeError, error:
# Handle errors in constructing a query.
print ('There was an error in constructing your query : %s' % error)
except HttpError, error:
# Handle API errors.
print ('Arg, there was an API error : %s : %s' %
(error.resp.status, error._get_reason()))
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run'
'the application to re-authorize')
def traverse_hiearchy(service):
"""Traverses the management API hiearchy and prints results.
This retrieves and prints the authorized user's accounts. It then
retrieves and prints all the web properties for the first account,
retrieves and prints all the profiles for the first web property,
and retrieves and prints all the goals for the first profile.
Args:
service: The service object built by the Google API Python client library.
Raises:
HttpError: If an error occured when accessing the API.
AccessTokenRefreshError: If the current token was invalid.
"""
accounts = service.management().accounts().list().execute()
print_accounts(accounts)
if accounts.get('items'):
firstAccountId = accounts.get('items')[0].get('id')
webproperties = service.management().webproperties().list(
accountId=firstAccountId).execute()
print_webproperties(webproperties)
if webproperties.get('items'):
firstWebpropertyId = webproperties.get('items')[0].get('id')
profiles = service.management().profiles().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId).execute()
print_profiles(profiles)
if profiles.get('items'):
firstProfileId = profiles.get('items')[0].get('id')
goals = service.management().goals().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId,
profileId=firstProfileId).execute()
print_goals(goals)
print_segments(service.management().segments().list().execute())
def print_accounts(accounts_response):
"""Prints all the account info in the Accounts Collection.
Args:
accounts_response: The response object returned from querying the Accounts
collection.
"""
print '------ Account Collection -------'
print_pagination_info(accounts_response)
print
for account in accounts_response.get('items', []):
print 'Account ID = %s' % account.get('id')
print 'Kind = %s' % account.get('kind')
print 'Self Link = %s' % account.get('selfLink')
print 'Account Name = %s' % account.get('name')
print 'Created = %s' % account.get('created')
print 'Updated = %s' % account.get('updated')
child_link = account.get('childLink')
print 'Child link href = %s' % child_link.get('href')
print 'Child link type = %s' % child_link.get('type')
print
else:
print 'No accounts found.\n'
def print_webproperties(webproperties_response):
"""Prints all the web property info in the WebProperties collection.
Args:
webproperties_response: The response object returned from querying the
Webproperties collection.
"""
print '------ Web Properties Collection -------'
print_pagination_info(webproperties_response)
print
for webproperty in webproperties_response.get('items', []):
print 'Kind = %s' % webproperty.get('kind')
print 'Account ID = %s' % webproperty.get('accountId')
print 'Web Property ID = %s' % webproperty.get('id')
print ('Internal Web Property ID = %s' %
webproperty.get('internalWebPropertyId'))
print 'Website URL = %s' % webproperty.get('websiteUrl')
print 'Created = %s' % webproperty.get('created')
print 'Updated = %s' % webproperty.get('updated')
print 'Self Link = %s' % webproperty.get('selfLink')
parent_link = webproperty.get('parentLink')
print 'Parent link href = %s' % parent_link.get('href')
print 'Parent link type = %s' % parent_link.get('type')
child_link = webproperty.get('childLink')
print 'Child link href = %s' % child_link.get('href')
print 'Child link type = %s' % child_link.get('type')
print
else:
print 'No webproperties found.\n'
def print_profiles(profiles_response):
"""Prints all the profile info in the Profiles Collection.
Args:
profiles_response: The response object returned from querying the
Profiles collection.
"""
print '------ Profiles Collection -------'
print_pagination_info(profiles_response)
print
for profile in profiles_response.get('items', []):
print 'Kind = %s' % profile.get('kind')
print 'Account ID = %s' % profile.get('accountId')
print 'Web Property ID = %s' % profile.get('webPropertyId')
print ('Internal Web Property ID = %s' %
profile.get('internalWebPropertyId'))
print 'Profile ID = %s' % profile.get('id')
print 'Profile Name = %s' % profile.get('name')
print 'Currency = %s' % profile.get('currency')
print 'Timezone = %s' % profile.get('timezone')
print 'Default Page = %s' % profile.get('defaultPage')
print ('Exclude Query Parameters = %s' %
profile.get('excludeQueryParameters'))
print ('Site Search Category Parameters = %s' %
profile.get('siteSearchCategoryParameters'))
print ('Site Search Query Parameters = %s' %
profile.get('siteSearchQueryParameters'))
print 'Created = %s' % profile.get('created')
print 'Updated = %s' % profile.get('updated')
print 'Self Link = %s' % profile.get('selfLink')
parent_link = profile.get('parentLink')
print 'Parent link href = %s' % parent_link.get('href')
print 'Parent link type = %s' % parent_link.get('type')
child_link = profile.get('childLink')
print 'Child link href = %s' % child_link.get('href')
print 'Child link type = %s' % child_link.get('type')
print
else:
print 'No profiles found.\n'
def print_goals(goals_response):
"""Prints all the goal info in the Goals collection.
Args:
goals_response: The response object returned from querying the Goals
collection
"""
print '------ Goals Collection -------'
print_pagination_info(goals_response)
print
for goal in goals_response.get('items', []):
print 'Goal ID = %s' % goal.get('id')
print 'Kind = %s' % goal.get('kind')
print 'Self Link = %s' % goal.get('selfLink')
print 'Account ID = %s' % goal.get('accountId')
print 'Web Property ID = %s' % goal.get('webPropertyId')
print ('Internal Web Property ID = %s' %
goal.get('internalWebPropertyId'))
print 'Profile ID = %s' % goal.get('profileId')
print 'Goal Name = %s' % goal.get('name')
print 'Goal Value = %s' % goal.get('value')
print 'Goal Active = %s' % goal.get('active')
print 'Goal Type = %s' % goal.get('type')
print 'Created = %s' % goal.get('created')
print 'Updated = %s' % goal.get('updated')
parent_link = goal.get('parentLink')
print 'Parent link href = %s' % parent_link.get('href')
print 'Parent link type = %s' % parent_link.get('type')
# Print the goal details depending on the type of goal.
if goal.get('urlDestinationDetails'):
print_url_destination_goal_details(
goal.get('urlDestinationDetails'))
elif goal.get('visitTimeOnSiteDetails'):
print_visit_time_on_site_goal_details(
goal.get('visitTimeOnSiteDetails'))
elif goal.get('visitNumPagesDetails'):
print_visit_num_pages_goal_details(
goal.get('visitNumPagesDetails'))
elif goal.get('eventDetails'):
print_event_goal_details(goal.get('eventDetails'))
print
else:
print 'No goals found.\n'
def print_url_destination_goal_details(goal_details):
"""Prints all the URL Destination goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print '------ Url Destination Goal -------'
print 'Goal URL = %s' % goal_details.get('url')
print 'Case Sensitive = %s' % goal_details.get('caseSensitive')
print 'Match Type = %s' % goal_details.get('matchType')
print 'First Step Required = %s' % goal_details.get('firstStepRequired')
print '------ Url Destination Goal Steps -------'
for goal_step in goal_details.get('steps', []):
print 'Step Number = %s' % goal_step.get('number')
print 'Step Name = %s' % goal_step.get('name')
print 'Step URL = %s' % goal_step.get('url')
else:
print 'No Steps Configured'
def print_visit_time_on_site_goal_details(goal_details):
"""Prints all the Visit Time On Site goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print '------ Visit Time On Site Goal -------'
print 'Comparison Type = %s' % goal_details.get('comparisonType')
print 'comparison Value = %s' % goal_details.get('comparisonValue')
def print_visit_num_pages_goal_details(goal_details):
"""Prints all the Visit Num Pages goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print '------ Visit Num Pages Goal -------'
print 'Comparison Type = %s' % goal_details.get('comparisonType')
print 'comparison Value = %s' % goal_details.get('comparisonValue')
def print_event_goal_details(goal_details):
"""Prints all the Event goal type info.
Args:
goal_details: The details portion of the goal response.
"""
print '------ Event Goal -------'
print 'Use Event Value = %s' % goal_details.get('useEventValue')
for event_condition in goal_details.get('eventConditions', []):
event_type = event_condition.get('type')
print 'Type = %s' % event_type
if event_type in ('CATEGORY', 'ACTION', 'LABEL'):
print 'Match Type = %s' % event_condition.get('matchType')
print 'Expression = %s' % event_condition.get('expression')
else: # VALUE type.
print 'Comparison Type = %s' % event_condition.get('comparisonType')
print 'Comparison Value = %s' % event_condition.get('comparisonValue')
def print_segments(segments_response):
"""Prints all the segment info in the Segments collection.
Args:
segments_response: The response object returned from querying the
Segments collection.
"""
print '------ Segments Collection -------'
print_pagination_info(segments_response)
print
for segment in segments_response.get('items', []):
print 'Segment ID = %s' % segment.get('id')
print 'Kind = %s' % segment.get('kind')
print 'Self Link = %s' % segment.get('selfLink')
print 'Name = %s' % segment.get('name')
print 'Definition = %s' % segment.get('definition')
print 'Created = %s' % segment.get('created')
print 'Updated = %s' % segment.get('updated')
print
def print_pagination_info(management_response):
"""Prints common pagination details.
Args:
management_response: The common reponse object for each collection in the
Management API.
"""
print 'Items per page = %s' % management_response.get('itemsPerPage')
print 'Total Results = %s' % management_response.get('totalResults')
print 'Start Index = %s' % management_response.get('startIndex')
# These only have values if other result pages exist.
if management_response.get('previousLink'):
print 'Previous Link = %s' % management_response.get('previousLink')
if management_response.get('nextLink'):
print 'Next Link = %s' % management_response.get('nextLink')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple intro to using the Google Analytics API v3.
This application demonstrates how to use the python client library to access
Google Analytics data. The sample traverses the Management API to obtain the
authorized user's first profile ID. Then the sample uses this ID to
contstruct a Core Reporting API query to return the top 25 organic search
terms.
Before you begin, you must sigup for a new project in the Google APIs console:
https://code.google.com/apis/console
Then register the project to use OAuth2.0 for installed applications.
Finally you will need to add the client id, client secret, and redirect URL
into the client_secrets.json file that is in the same directory as this sample.
Sample Usage:
$ python hello_analytics_api_v3.py
Also you can also get help on all the command-line flags the program
understands by running:
$ python hello_analytics_api_v3.py --help
"""
__author__ = 'api.nickm@gmail.com (Nick Mihailovski)'
import sys
import sample_utils
from apiclient.errors import HttpError
from oauth2client.client import AccessTokenRefreshError
def main(argv):
sample_utils.process_flags(argv)
# Authenticate and construct service.
service = sample_utils.initialize_service()
# Try to make a request to the API. Print the results or handle errors.
try:
first_profile_id = get_first_profile_id(service)
if not first_profile_id:
print 'Could not find a valid profile for this user.'
else:
results = get_top_keywords(service, first_profile_id)
print_results(results)
except TypeError, error:
# Handle errors in constructing a query.
print ('There was an error in constructing your query : %s' % error)
except HttpError, error:
# Handle API errors.
print ('Arg, there was an API error : %s : %s' %
(error.resp.status, error._get_reason()))
except AccessTokenRefreshError:
# Handle Auth errors.
print ('The credentials have been revoked or expired, please re-run '
'the application to re-authorize')
def get_first_profile_id(service):
"""Traverses Management API to return the first profile id.
This first queries the Accounts collection to get the first account ID.
This ID is used to query the Webproperties collection to retrieve the first
webproperty ID. And both account and webproperty IDs are used to query the
Profile collection to get the first profile id.
Args:
service: The service object built by the Google API Python client library.
Returns:
A string with the first profile ID. None if a user does not have any
accounts, webproperties, or profiles.
"""
accounts = service.management().accounts().list().execute()
if accounts.get('items'):
firstAccountId = accounts.get('items')[0].get('id')
webproperties = service.management().webproperties().list(
accountId=firstAccountId).execute()
if webproperties.get('items'):
firstWebpropertyId = webproperties.get('items')[0].get('id')
profiles = service.management().profiles().list(
accountId=firstAccountId,
webPropertyId=firstWebpropertyId).execute()
if profiles.get('items'):
return profiles.get('items')[0].get('id')
return None
def get_top_keywords(service, profile_id):
"""Executes and returns data from the Core Reporting API.
This queries the API for the top 25 organic search terms by visits.
Args:
service: The service object built by the Google API Python client library.
profile_id: String The profile ID from which to retrieve analytics data.
Returns:
The response returned from the Core Reporting API.
"""
return service.data().ga().get(
ids='ga:' + profile_id,
start_date='2012-01-01',
end_date='2012-01-15',
metrics='ga:visits',
dimensions='ga:source,ga:keyword',
sort='-ga:visits',
filters='ga:medium==organic',
start_index='1',
max_results='25').execute()
def print_results(results):
"""Prints out the results.
This prints out the profile name, the column headers, and all the rows of
data.
Args:
results: The response returned from the Core Reporting API.
"""
print
print 'Profile Name: %s' % results.get('profileInfo').get('profileName')
print
# Print header.
output = []
for header in results.get('columnHeaders'):
output.append('%30s' % header.get('name'))
print ''.join(output)
# Print data table.
if results.get('rows', []):
for row in results.get('rows'):
output = []
for cell in row:
output.append('%30s' % cell)
print ''.join(output)
else:
print 'No Rows Found'
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple command-line example for Translate.
Command-line application that translates some text.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from apiclient.discovery import build
def main():
# Build a service object for interacting with the API. Visit
# the Google APIs Console <http://code.google.com/apis/console>
# to get an API key for your own application.
service = build('translate', 'v2',
developerKey='AIzaSyDRRpR3GS1F1_jKNNM9HCNd2wJQyPG3oN0')
print service.translations().list(
source='en',
target='fr',
q=['flower', 'car']
).execute()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for the Google TaskQueue API command-line tool."""
__version__ = '1.0.2'
import sys
try:
from setuptools import setup
print 'Loaded setuptools'
except ImportError:
from distutils.core import setup
print 'Loaded distutils.core'
PACKAGE_NAME = 'google-taskqueue-client'
INSTALL_REQUIRES = ['google-apputils==0.1',
'google-api-python-client',
'httplib2',
'oauth2',
'python-gflags']
setup(name=PACKAGE_NAME,
version=__version__,
description='Google TaskQueue API command-line tool and utils',
author='Google Inc.',
author_email='google-appengine@googlegroups.com',
url='http://code.google.com/appengine/docs/python/taskqueue/pull/overview.html',
install_requires=INSTALL_REQUIRES,
packages=['gtaskqueue'],
scripts=['gtaskqueue/gtaskqueue', 'gtaskqueue/gtaskqueue_puller',
'gtaskqueue/gen_appengine_access_token'],
license='Apache 2.0',
keywords='google taskqueue api client',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to pull tasks from TaskQueues and execute them.
This module does the following in an infinite loop.
1. Connects to Task API (of TaskQueues API collection) to request lease on
certain number of tasks (specified by user).
2. Spawns parallel processes to execute the leased tasks.
3. Polls all the tasks continously till they finish.
4. Deletes the tasks from taskqueue on their successful completion.
5. It lets the user specify when to invoke the lease request instead of polling
tasks status in a tight loop for better resource utilization:
a. Invoke the Lease request when runnning tasks go beyound certain
threshold (min_running_tasks)
b. Wait time becomes more than specified poll-time-out interval.
6. Repeat the steps from 1 to 5 when either all tasks have finished executing
or one of the conditions in 5) is met. """
import sys
import time
from apiclient.errors import HttpError
from gtaskqueue.client_task import ClientTask
from gtaskqueue.taskqueue_client import TaskQueueClient
from gtaskqueue.taskqueue_logger import logger
from gtaskqueue.taskqueue_logger import set_logger
from google.apputils import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'project_name',
'default',
'The name of the Taskqueue API project.')
flags.DEFINE_string(
'taskqueue_name',
'testpuller',
'taskqueue to which client wants to connect to')
flags.DEFINE_integer(
'lease_secs',
30,
'The lease for the task in seconds')
flags.DEFINE_integer(
'num_tasks',
10,
'The number of tasks to lease')
flags.DEFINE_integer(
'min_running_tasks',
0,
'minmum number of tasks below which lease can be invoked')
flags.DEFINE_float(
'sleep_interval_secs',
2,
'sleep interval when no tasks are found in the taskqueue')
flags.DEFINE_float(
'timeout_secs_for_next_lease_request',
600,
'Wait time before next poll when no tasks are found in the'
'queue (in seconds)')
flags.DEFINE_integer(
'taskapi_requests_per_sec',
None,
'limit on task_api requests per second')
flags.DEFINE_float(
'sleep_before_next_poll_secs',
2,
'sleep interval before next poll')
class TaskQueuePuller(object):
"""Maintains state information for TaskQueuePuller."""
def __init__(self):
self._last_lease_time = None
self._poll_timeout_start = None
self._num_last_leased_tasks = 0
# Dictionary for running tasks's ids and their corresponding
# client_task object.
self._taskprocess_map = {}
try:
self.__tcq = TaskQueueClient()
self.task_api = self.__tcq.get_taskapi()
except HttpError, http_error:
logger.error('Could not get TaskQueue API handler and hence' \
'exiting: %s' % str(http_error))
sys.exit()
def _can_lease(self):
"""Determines if new tasks can be leased.
Determines if new taks can be leased based on
1. Number of tasks already running in the system.
2. Limit on accessing the taskqueue apirary.
Returns:
True/False.
"""
if self._num_tasks_to_lease() > 0 and not self._is_rate_exceeded():
return True
else:
return False
def _is_rate_exceeded(self):
"""Determines if requests/second to TaskQueue API has exceeded limit.
We do not access the APIs beyond the specified permissible limit.
If we have run N tasks in elapsed time since last lease, we have
already made N+1 requests to API (1 for collective lease and N for
their individual delete operations). If K reqs/sec is the limit on
accessing APIs, then we sould not invoke any request to API before
N+1/K sec approximately. The above condition is formulated in the
following method.
Returns:
True/False
"""
if not FLAGS.taskapi_requests_per_sec:
return False
if not self._last_lease_time:
return False
curr_time = time.time()
if ((curr_time - self._last_lease_time) <
((1.0 * (self._num_last_leased_tasks -
len(self._taskprocess_map)) /
FLAGS.taskapi_requests_per_sec))):
return True
else:
return False
def _num_tasks_to_lease(self):
"""Determines how many tasks can be leased.
num_tasks is upper limit to running tasks in the system and hence
number of tasks which could be leased is difference of numtasks and
currently running tasks.
Returns:
Number of tasks to lease.
"""
return FLAGS.num_tasks - len(self._taskprocess_map)
def _update_last_lease_info(self, result):
"""Updates the information regarding last lease.
Args:
result: Response object from TaskQueue API, containing list of
tasks.
"""
self._last_lease_time = time.time()
if result:
if result.get('items'):
self._num_last_leased_tasks = len(result.get('items'))
else:
self._num_last_leased_tasks = 0
else:
self._num_last_leased_tasks = 0
def _update_poll_timeout_start(self):
"""Updates the start time for poll-timeout."""
if not self._poll_timeout_start:
self._poll_timeout_start = time.time()
def _continue_polling(self):
"""Checks whether lease can be invoked based on running tasks and
timeout.
Lease can be invoked if
1. Running tasks in the sytem has gone below the specified
threshold (min_running_tasks).
2. Wait time has exceeded beyond time-out specified and at least one
tas has finished since last lease invocation.
By doing this, we are essentially trying to batch the lease requests.
If this is not done and we start off leasing N tasks, its likely tasks
may finish slightly one after another, and we make N lease requests for
each task for next N tasks and so on. This can result in unnecessary
lease API call and hence to avoid that, we try and batch the lease
requests. Also we put certain limit on wait time for batching the
requests by incororating the time-out.
Returns:
True/False
"""
if len(self._taskprocess_map) <= FLAGS.min_running_tasks:
return False
if self._poll_timeout_start:
elapsed_time = time.time() - self._poll_timeout_start
if elapsed_time > FLAGS.timeout_secs_for_next_lease_request:
self._poll_timeout_start = None
return False
return True
def _get_tasks_from_queue(self):
"""Gets the available tasks from the taskqueue.
Returns:
Lease response object.
"""
try:
tasks_to_fetch = self._num_tasks_to_lease()
lease_req = self.task_api.tasks().lease(
project=FLAGS.project_name,
taskqueue=FLAGS.taskqueue_name,
leaseSecs=FLAGS.lease_secs,
numTasks=tasks_to_fetch,
body={})
result = lease_req.execute()
return result
except HttpError, http_error:
logger.error('Error during lease request: %s' % str(http_error))
return None
def _create_subprocesses_for_tasks(self, result):
"""Spawns parallel sub processes to execute tasks for better
throughput.
Args:
result: lease resonse dictionary object.
"""
if not result:
logger.info('Error: result is not defined')
return None
if result.get('items'):
for task in result.get('items'):
task_id = task.get('id')
# Given that a task may be leased multiple times, we may get a
# task which we are currently executing on, so make sure we
# dont spaw another subprocess for it.
if task_id not in self._taskprocess_map:
ct = ClientTask(task)
# Check if tasks got initialized properly and then pu them
# in running tasks map.
if ct.init():
# Put the clientTask objects in a dictionary to keep
# track of stats and objects are used later to delete
# the tasks from taskqueue
self._taskprocess_map[ct.get_task_id()] = ct
def _poll_running_tasks(self):
"""Polls all the running tasks and delete them from taskqueue if
completed."""
if self._taskprocess_map:
for task in self._taskprocess_map.values():
if task.is_completed(self.task_api):
del self._taskprocess_map[task.get_task_id()]
# updates scheduling information for later use.
self._update_poll_timeout_start()
def _sleep_before_next_lease(self):
"""Sleeps before invoking lease if required based on last lease info.
It sleeps when no tasks were found on the taskqueue during last lease
request. To note, it discount the time taken in polling the tasks and
sleeps for (sleep_interval - time taken in poll). This avoids the
unnecessary wait if tasks could be leased. If no time was taken in
poll since there were not tasks in the system, it waits for full sleep
interval and thus optimizes the CPU cycles.
It does not sleep if the method is called for the first time (when no
lease request has ever been made).
"""
if not self._last_lease_time:
sleep_secs = 0
elif self._num_last_leased_tasks <= 0:
time_elpased_since_last_lease = time.time() - self._last_lease_time
sleep_secs = (FLAGS.sleep_interval_secs -
time_elpased_since_last_lease)
if sleep_secs > 0:
logger.info('No tasks found and hence sleeping for sometime')
time.sleep(FLAGS.sleep_interval_secs)
def lease_tasks(self):
"""Requests lease for specified number of tasks.
It invokes lease request for appropriate number of tasks, spawns
parallel processes to execute them and also maintains scheduling
information.
LeaseTask also takes care of waiting(sleeping) before invoking lease if
there are no tasks which can be leased in the taskqueue. This results
in better resource utilization. Apart from this, it also controls the
number of requests being sent to taskqueue APIs.
Returns:
True/False based on if tasks could be leased or not.
"""
self._sleep_before_next_lease()
if self._can_lease():
result = self._get_tasks_from_queue()
self._update_last_lease_info(result)
self._create_subprocesses_for_tasks(result)
return True
return False
def poll_tasks(self):
"""Polls the status of running tasks of the system.
Polls the status of tasks and then decides if it should continue to
poll depending on number of tasks running in the system and timeouts.
Instead of polling in a tight loop, it sleeps for sometime before the
next poll to avoid any unnecessary CPU cycles. poll_tasks returns
only when system has capability to accomodate at least one new task.
"""
self._poll_running_tasks()
while self._continue_polling():
logger.info('Sleeping before next poll')
time.sleep(FLAGS.sleep_before_next_poll_secs)
self._poll_running_tasks()
def main(argv):
"""Infinite loop to lease new tasks and poll them for completion."""
# Settings for logger
set_logger()
# Instantiate puller
puller = TaskQueuePuller()
while True:
puller.lease_tasks()
puller.poll_tasks()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands to interact with the Task object of the TaskQueue API."""
__version__ = '0.0.1'
from gtaskqueue.taskqueue_cmd_base import GoogleTaskCommand
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
FLAGS = flags.FLAGS
class GetTaskCommand(GoogleTaskCommand):
"""Get properties of an existing task."""
def __init__(self, name, flag_values):
super(GetTaskCommand, self).__init__(name, flag_values)
def build_request(self, task_api, flag_values):
"""Build a request to get properties of a Task.
Args:
task_api: The handle to the task collection API.
flag_values: The parsed command flags.
Returns:
The properties of the task.
"""
return task_api.get(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
task=flag_values.task_name)
class LeaseTaskCommand(GoogleTaskCommand):
"""Lease a new task from the queue."""
def __init__(self, name, flag_values):
flags.DEFINE_integer('lease_secs',
None,
'The lease for the task in seconds',
flag_values=flag_values)
flags.DEFINE_integer('num_tasks',
1,
'The number of tasks to lease',
flag_values=flag_values)
flags.DEFINE_integer('payload_size_to_display',
2 * 1024 * 1024,
'Size of the payload for leased tasks to show',
flag_values=flag_values)
super(LeaseTaskCommand, self).__init__(name,
flag_values,
need_task_flag=False)
def build_request(self, task_api, flag_values):
"""Build a request to lease a pending task from the TaskQueue.
Args:
task_api: The handle to the task collection API.
flag_values: The parsed command flags.
Returns:
A new leased task.
"""
if not flag_values.lease_secs:
raise app.UsageError('lease_secs must be specified')
return task_api.lease(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
leaseSecs=flag_values.lease_secs,
numTasks=flag_values.num_tasks)
def print_result(self, result):
"""Override to optionally strip the payload since it can be long."""
if result.get('items'):
items = []
for task in result.get('items'):
payloadlen = len(task['payloadBase64'])
if payloadlen > FLAGS.payload_size_to_display:
extra = payloadlen - FLAGS.payload_size_to_display
task['payloadBase64'] = ('%s(%d more bytes)' %
(task['payloadBase64'][:FLAGS.payload_size_to_display],
extra))
items.append(task)
result['items'] = items
GoogleTaskCommand.print_result(self, result)
class DeleteTaskCommand(GoogleTaskCommand):
"""Delete an existing task."""
def __init__(self, name, flag_values):
super(DeleteTaskCommand, self).__init__(name, flag_values)
def build_request(self, task_api, flag_values):
"""Build a request to delete a Task.
Args:
task_api: The handle to the taskqueue collection API.
flag_values: The parsed command flags.
Returns:
Whether the delete was successful.
"""
return task_api.delete(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
task=flag_values.task_name)
class ListTasksCommand(GoogleTaskCommand):
"""Lists all tasks in a queue (currently upto a max of 100)."""
def __init__(self, name, flag_values):
super(ListTasksCommand, self).__init__(name,
flag_values,
need_task_flag=False)
def build_request(self, task_api, flag_values):
"""Build a request to lists tasks in a queue.
Args:
task_api: The handle to the taskqueue collection API.
flag_values: The parsed command flags.
Returns:
A list of pending tasks in the queue.
"""
return task_api.list(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name)
class ClearTaskQueueCommand(GoogleTaskCommand):
"""Deletes all tasks in a queue (default to a max of 100)."""
def __init__(self, name, flag_values):
flags.DEFINE_integer('max_delete', 100, 'How many to clear at most',
flag_values=flag_values)
super(ClearTaskQueueCommand, self).__init__(name,
flag_values,
need_task_flag=False)
def run_with_api_and_flags(self, api, flag_values):
"""Run the command, returning the result.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
Returns:
The result of running the command.
"""
tasks_api = api.tasks()
self._flag_values = flag_values
self._to_delete = flag_values.max_delete
total_deleted = 0
while self._to_delete > 0:
n_deleted = self._delete_a_batch(tasks_api)
if n_deleted <= 0:
break
total_deleted += n_deleted
return {'deleted': total_deleted}
def _delete_a_batch(self, tasks):
"""Delete a batch of tasks.
Since the list method only gives us back 100 at a time, we may have
to call it several times to clear the entire queue.
Args:
tasks: The handle to the Google TaskQueue API Tasks resource.
Returns:
The number of tasks deleted.
"""
list_request = tasks.list(project=self._flag_values.project_name,
taskqueue=self._flag_values.taskqueue_name)
result = list_request.execute()
n_deleted = 0
if result:
for task in result.get('items', []):
if self._to_delete > 0:
self._to_delete -= 1
n_deleted += 1
print 'Deleting: %s' % task['id']
tasks.delete(project=self._flag_values.project_name,
taskqueue=self._flag_values.taskqueue_name,
task=task['id']).execute()
return n_deleted
def add_commands():
appcommands.AddCmd('listtasks', ListTasksCommand)
appcommands.AddCmd('gettask', GetTaskCommand)
appcommands.AddCmd('deletetask', DeleteTaskCommand)
appcommands.AddCmd('leasetask', LeaseTaskCommand)
appcommands.AddCmd('clear', ClearTaskQueueCommand)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Command line tool for interacting with Google TaskQueue API."""
__version__ = '0.0.1'
import logging
from gtaskqueue import task_cmds
from gtaskqueue import taskqueue_cmds
from google.apputils import appcommands
import gflags as flags
LOG_LEVELS = [logging.DEBUG,
logging.INFO,
logging.WARNING,
logging.CRITICAL]
LOG_LEVEL_NAMES = map(logging.getLevelName, LOG_LEVELS)
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'log_level',
logging.getLevelName(logging.WARNING),
LOG_LEVEL_NAMES,
'Logging output level.')
def main(unused_argv):
log_level_map = dict(
[(logging.getLevelName(level), level) for level in LOG_LEVELS])
logging.getLogger().setLevel(log_level_map[FLAGS.log_level])
taskqueue_cmds.add_commands()
task_cmds.add_commands()
if __name__ == '__main__':
appcommands.Run()
| Python |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to encapsulate task related information and methods on task_puller."""
import base64
import oauth2 as oauth
import os
import subprocess
import tempfile
import time
import urllib2
from apiclient.errors import HttpError
from gtaskqueue.taskqueue_logger import logger
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'executable_binary',
'/bin/cat',
'path of the binary to be executed')
flags.DEFINE_string(
'output_url',
'',
'url to which output is posted. The url must include param name, '
'value for which is populated with task_id from puller while posting '
'the data. Format of output url is absolute url which handles the'
'post request from task queue puller.'
'(Eg: "http://taskpuller.appspot.com/taskdata?name=").'
'The Param value is always the task_id. The handler for this post'
'should be able to associate the task with its id and take'
'appropriate action. Use the appengine_access_token.py tool to'
'generate the token and store it in a file before you start.')
flags.DEFINE_string(
'appengine_access_token_file',
None,
'File containing an Appengine Access token, if any. If present this'
'token is added to the output_url request, so that the output_url can'
'be an authenticated end-point. Use the appengine_access_token.py tool'
'to generate the token and store it in a file before you start.')
flags.DEFINE_float(
'task_timeout_secs',
'3600',
'timeout to kill the task')
class ClientTaskInitError(Exception):
"""Raised when initialization of client task fails."""
def __init__(self, task_id, error_str):
Exception.__init__(self)
self.task_id = task_id
self.error_str = error_str
def __str__(self):
return ('Error initializing task "%s". Error details "%s". '
% (self.task_id, self.error_str))
class ClientTask(object):
"""Class to encapsulate task information pulled by taskqueue_puller module.
This class is responsible for creating an independent client task object by
taking some information from lease response task object. It encapsulates
methods responsible for spawning an independent subprocess for executing
the task, tracking the status of the task and also deleting the task from
taskqeueue when completed. It also has the functionality to give the output
back to the application by posting to the specified url.
"""
def __init__(self, task):
self._task = task
self._process = None
self._output_file = None
# Class method that caches the Appengine Access Token if any
@classmethod
def get_access_token(cls):
if not FLAGS.appengine_access_token_file:
return None
if not _access_token:
fhandle = open(FLAGS.appengine_access_token_file, 'rb')
_access_token = oauth.Token.from_string(fhandle.read())
fhandle.close()
return _access_token
def init(self):
"""Extracts information from task object and intializes processing.
Extracts id and payload from task object, decodes the payload and puts
it in input file. After this, it spawns a subprocess to execute the
task.
Returns:
True if everything till task execution starts fine.
False if anything goes wrong in initialization of task execution.
"""
try:
self.task_id = self._task.get('id')
self._payload = self._decode_base64_payload(
self._task.get('payloadBase64'))
self._payload_file = self._dump_payload_to_file()
self._start_task_execution()
return True
except ClientTaskInitError, ctie:
logger.error(str(ctie))
return False
def _decode_base64_payload(self, encoded_str):
"""Method to decode payload encoded in base64."""
try:
# If the payload is empty, do not try to decode it. Payload usually
# not expected to be empty and hence log a warning and then
# continue.
if encoded_str:
decoded_str = base64.urlsafe_b64decode(
encoded_str.encode('utf-8'))
return decoded_str
else:
logger.warn('Empty paylaod for task %s' % self.task_id)
return ''
except base64.binascii.Error, berror:
logger.error('Error decoding payload for task %s. Error details %s'
% (self.task_id, str(berror)))
raise ClientTaskInitError(self.task_id, 'Error decoding payload')
# Generic catch block to avoid crashing of puller due to some bad
# encoding issue wih payload of any task.
except:
raise ClientTaskInitError(self.task_id, 'Error decoding payload')
def _dump_payload_to_file(self):
"""Method to write input extracted from payload to a temporary file."""
try:
(fd, fname) = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(self._payload)
f.close()
return fname
except OSError:
logger.error('Error dumping payload %s. Error details %s' %
(self.task_id, str(OSError)))
raise ClientTaskInitError(self.task_id, 'Error dumping payload')
def _get_input_file(self):
return self._payload_file
def _post_output(self):
"""Posts the outback back to specified url in the form of a byte
array.
It reads the output generated by the task as a byte-array. It posts the
response to specified url appended with the taskId. The application
using the taskqueue must have a handler to handle the data being posted
from puller. Format of body of response object is byte-array to make
the it genric for any kind of output generated.
Returns:
True/False based on post status.
"""
if FLAGS.output_url:
try:
f = open(self._get_output_file(), 'rb')
body = f.read()
f.close()
url = FLAGS.output_url + self.task_id
logger.debug('Posting data to url %s' % url)
headers = {'Content-Type': 'byte-array'}
# Add an access token to the headers if specified.
# This enables the output_url to be authenticated and not open.
access_token = ClientTask.get_access_token()
if access_token:
consumer = oauth.Consumer('anonymous', 'anonymous')
oauth_req = oauth.Request.from_consumer_and_token(
consumer,
token=access_token,
http_url=url)
headers.update(oauth_req.to_header())
# TODO: Use httplib instead of urllib for consistency.
req = urllib2.Request(url, body, headers)
urllib2.urlopen(req)
except ValueError:
logger.error('Error posting data back %s. Error details %s'
% (self.task_id, str(ValueError)))
return False
except Exception:
logger.error('Exception while posting data back %s. Error'
'details %s' % (self.task_id, str(Exception)))
return False
return True
def _get_output_file(self):
"""Returns the output file if it exists, else creates it and returns
it."""
if not self._output_file:
(_, self._output_file) = tempfile.mkstemp()
return self._output_file
def get_task_id(self):
return self.task_id
def _start_task_execution(self):
"""Method to spawn subprocess to execute the tasks.
This method splits the commands/executable_binary to desired arguments
format for Popen API. It appends input and output files to the
arguments. It is assumed that commands/executable_binary expects input
and output files as first and second positional parameters
respectively.
"""
# TODO: Add code to handle the cleanly shutdown when a process is killed
# by Ctrl+C.
try:
cmdline = FLAGS.executable_binary.split(' ')
cmdline.append(self._get_input_file())
cmdline.append(self._get_output_file())
self._process = subprocess.Popen(cmdline)
self.task_start_time = time.time()
except OSError:
logger.error('Error creating subprocess %s. Error details %s'
% (self.task_id, str(OSError)))
self._cleanup()
raise ClientTaskInitError(self.task_id,
'Error creating subprocess')
except ValueError:
logger.error('Invalid arguments while executing task ',
self.task_id)
self._cleanup()
raise ClientTaskInitError(self.task_id,
'Invalid arguments while executing task')
def is_completed(self, task_api):
"""Method to check if task has finished executing.
This is responsible for checking status of task execution. If the task
has already finished executing, it deletes the task from the task
queue. If the task has been running since long time then it assumes
that there is high proabbility that it is dfunct and hence kills the
corresponding subprocess. In this case, task had not completed
successfully and hence we do not delete it form the taskqueue. In above
two cases, task completion status is returned as true since there is
nothing more to run in the task. In all other cases, task is still
running and hence we return false as completion status.
Args:
task_api: handle for taskqueue api collection.
Returns:
Task completion status (True/False)
"""
status = False
try:
task_status = self._process.poll()
if task_status == 0:
status = True
if self._post_output():
self._delete_task_from_queue(task_api)
self._cleanup()
elif self._has_timedout():
status = True
self._kill_subprocess()
except OSError:
logger.error('Error during polling status of task %s, Error '
'details %s' % (self.task_id, str(OSError)))
return status
def _cleanup(self):
"""Cleans up temporary input/output files used in task execution."""
try:
if os.path.exists(self._get_input_file()):
os.remove(self._get_input_file())
if os.path.exists(self._get_output_file()):
os.remove(self._get_output_file())
except OSError:
logger.error('Error during file cleanup for task %s. Error'
'details %s' % (self.task_id, str(OSError)))
def _delete_task_from_queue(self, task_api):
"""Method to delete the task from the taskqueue.
First, it tries to post the output back to speified url. On successful
post, the task is deleted from taskqueue since the task has produced
expected output. If the post was unsuccessful, the task is not deleted
form the tskqueue since the expected output has yet not reached the
application. In either case cleanup is performed on the task.
Args:
task_api: handle for taskqueue api collection.
Returns:
Delete status (True/False)
"""
try:
delete_request = task_api.tasks().delete(
project=FLAGS.project_name,
taskqueue=FLAGS.taskqueue_name,
task=self.task_id)
delete_request.execute()
except HttpError, http_error:
logger.error('Error deleting task %s from taskqueue.'
'Error details %s'
% (self.task_id, str(http_error)))
def _has_timedout(self):
"""Checks if task has been running since long and has timedout."""
if (time.time() - self.task_start_time) > FLAGS.task_timeout_secs:
return True
else:
return False
def _kill_subprocess(self):
"""Kills the process after cleaning up the task."""
self._cleanup()
try:
self._process.kill()
logger.info('Trying to kill task %s, since it has been running '
'for long' % self.task_id)
except OSError:
logger.error('Error killing task %s. Error details %s'
% (self.task_id, str(OSError)))
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands to interact with the TaskQueue object of the TaskQueue API."""
__version__ = '0.0.1'
from gtaskqueue.taskqueue_cmd_base import GoogleTaskQueueCommand
from google.apputils import appcommands
import gflags as flags
FLAGS = flags.FLAGS
class GetTaskQueueCommand(GoogleTaskQueueCommand):
"""Get properties of an existing task queue."""
def __init__(self, name, flag_values):
flags.DEFINE_boolean('get_stats',
False,
'Whether to get Stats',
flag_values=flag_values)
super(GetTaskQueueCommand, self).__init__(name, flag_values)
def build_request(self, taskqueue_api, flag_values):
"""Build a request to get properties of a TaskQueue.
Args:
taskqueue_api: The handle to the taskqueue collection API.
flag_values: The parsed command flags.
Returns:
The properties of the taskqueue.
"""
return taskqueue_api.get(project=flag_values.project_name,
taskqueue=flag_values.taskqueue_name,
getStats=flag_values.get_stats)
def add_commands():
appcommands.AddCmd('getqueue', GetTaskQueueCommand)
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to connect to TaskQueue API."""
import os
import sys
import urlparse
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
from oauth2client.anyjson import simplejson as json
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from gtaskqueue.taskqueue_logger import logger
from google.apputils import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'service_version',
'v1beta1',
'Google taskqueue api version.')
flags.DEFINE_string(
'api_host',
'https://www.googleapis.com/',
'API host name')
flags.DEFINE_bool(
'use_developer_key',
False,
'User wants to use the developer key while accessing taskqueue apis')
flags.DEFINE_string(
'developer_key_file',
'~/.taskqueue.apikey',
'Developer key provisioned from api console')
flags.DEFINE_bool(
'dump_request',
False,
'Prints the outgoing HTTP request along with headers and body.')
flags.DEFINE_string(
'credentials_file',
'taskqueue.dat',
'File where you want to store the auth credentails for later user')
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the Identity tab on
# the Google APIs Console <http://code.google.com/apis/console>
FLOW = OAuth2WebServerFlow(
client_id='157776985798.apps.googleusercontent.com',
client_secret='tlpVCmaS6yLjxnnPu0ARIhNw',
scope='https://www.googleapis.com/auth/taskqueue',
user_agent='taskqueue-cmdline-sample/1.0')
class TaskQueueClient:
"""Class to setup connection with taskqueue API."""
def __init__(self):
if not FLAGS.project_name:
raise app.UsageError('You must specify a project name'
' using the "--project_name" flag.')
discovery_uri = (
FLAGS.api_host + 'discovery/v1/apis/{api}/{apiVersion}/rest')
logger.info(discovery_uri)
try:
# If the Credentials don't exist or are invalid run through the
# native clien flow. The Storage object will ensure that if
# successful the good Credentials will get written back to a file.
# Setting FLAGS.auth_local_webserver to false since we can run our
# tool on Virtual Machines and we do not want to run the webserver
# on VMs.
FLAGS.auth_local_webserver = False
storage = Storage(FLAGS.credentials_file)
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
http = credentials.authorize(self._dump_request_wrapper(
httplib2.Http()))
self.task_api = build('taskqueue',
FLAGS.service_version,
http=http,
discoveryServiceUrl=discovery_uri)
except HttpError, http_error:
logger.error('Error gettin task_api: %s' % http_error)
def get_taskapi(self):
"""Returns handler for tasks API from taskqueue API collection."""
return self.task_api
def _dump_request_wrapper(self, http):
"""Dumps the outgoing HTTP request if requested.
Args:
http: An instance of httplib2.Http or something that acts like it.
Returns:
httplib2.Http like object.
"""
request_orig = http.request
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Overrides the http.request method to add some utilities."""
if (FLAGS.api_host + "discovery/" not in uri and
FLAGS.use_developer_key):
developer_key_path = os.path.expanduser(
FLAGS.developer_key_file)
if not os.path.isfile(developer_key_path):
print 'Please generate developer key from the Google API' \
'Console and store it in %s' % (FLAGS.developer_key_file)
sys.exit()
developer_key_file = open(developer_key_path, 'r')
try:
developer_key = developer_key_file.read().strip()
except IOError, io_error:
print 'Error loading developer key from file %s' % (
FLAGS.developer_key_file)
print 'Error details: %s' % str(io_error)
sys.exit()
finally:
developer_key_file.close()
s = urlparse.urlparse(uri)
query = 'key=' + developer_key
if s.query:
query = s.query + '&key=' + developer_key
d = urlparse.ParseResult(s.scheme,
s.netloc,
s.path,
s.params,
query,
s.fragment)
uri = urlparse.urlunparse(d)
if FLAGS.dump_request:
print '--request-start--'
print '%s %s' % (method, uri)
if headers:
for (h, v) in headers.iteritems():
print '%s: %s' % (h, v)
print ''
if body:
print json.dumps(json.loads(body),
sort_keys=True,
indent=2)
print '--request-end--'
return request_orig(uri,
method,
body,
headers,
redirections,
connection_type)
http.request = new_request
return http
def print_result(self, result):
"""Pretty-print the result of the command.
The default behavior is to dump a formatted JSON encoding
of the result.
Args:
result: The JSON-serializable result to print.
"""
# We could have used the pprint module, but it produces
# noisy output due to all of our keys and values being
# unicode strings rather than simply ascii.
print json.dumps(result, sort_keys=True, indent=2)
| Python |
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool to get an Access Token to access an auth protected Appengine end point.
This tool talks to the appengine end point, and gets an Access Token that is
stored in a file. This token can be used by a tool to do authorized access to
an appengine end point.
"""
from google.apputils import app
import gflags as flags
import httplib2
import oauth2 as oauth
import time
FLAGS = flags.FLAGS
flags.DEFINE_string(
'appengine_host',
None,
'Appengine Host for whom we are trying to get an access token')
flags.DEFINE_string(
'access_token_file',
None,
'The file where the access token is stored')
def get_access_token():
if not FLAGS.appengine_host:
print('must supply the appengine host')
exit(1)
# setup
server = FLAGS.appengine_host
request_token_url = server + '/_ah/OAuthGetRequestToken'
authorization_url = server + '/_ah/OAuthAuthorizeToken'
access_token_url = server + '/_ah/OAuthGetAccessToken'
consumer = oauth.Consumer('anonymous', 'anonymous')
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
# The Http client that will be used to make the requests.
h = httplib2.Http()
# get request token
print '* Obtain a request token ...'
parameters = {}
# We dont have a callback server, we're going to use the browser to
# authorize.
#TODO: Add check for 401 etc
parameters['oauth_callback'] = 'oob'
oauth_req1 = oauth.Request.from_consumer_and_token(
consumer, http_url=request_token_url, parameters=parameters)
oauth_req1.sign_request(signature_method_hmac_sha1, consumer, None)
print 'Request headers: %s' % str(oauth_req1.to_header())
response, content = h.request(oauth_req1.to_url(), 'GET')
token = oauth.Token.from_string(content)
print 'GOT key: %s secret:%s' % (str(token.key), str(token.secret))
print '* Authorize the request token ...'
oauth_req2 = oauth.Request.from_token_and_callback(
token=token, callback='oob', http_url=authorization_url)
print 'Please run this URL in a browser and paste the token back here'
print oauth_req2.to_url()
verification_code = raw_input('Enter verification code: ').strip()
token.set_verifier(verification_code)
# get access token
print '* Obtain an access token ...'
oauth_req3 = oauth.Request.from_consumer_and_token(
consumer, token=token, http_url=access_token_url)
oauth_req3.sign_request(signature_method_hmac_sha1, consumer, token)
print 'Request headers: %s' % str(oauth_req3.to_header())
response, content = h.request(oauth_req3.to_url(), 'GET')
access_token = oauth.Token.from_string(content)
print 'Access Token key: %s secret:%s' % (str(access_token.key),
str(access_token.secret))
# Save the token to a file if its specified.
if FLAGS.access_token_file:
fhandle = open(FLAGS.access_token_file, 'w')
fhandle.write(access_token.to_string())
fhandle.close()
# Example : access some protected resources
print '* Checking the access token against protected resources...'
# Assumes that the server + "/" is protected.
test_url = server + "/"
oauth_req4 = oauth.Request.from_consumer_and_token(consumer,
token=token,
http_url=test_url)
oauth_req4.sign_request(signature_method_hmac_sha1, consumer, token)
resp, content = h.request(test_url, "GET", headers=oauth_req4.to_header())
print resp
print content
def main(argv):
get_access_token()
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands for interacting with Google TaskQueue."""
__version__ = '0.0.1'
import os
import sys
import urlparse
from apiclient.discovery import build
from apiclient.errors import HttpError
import httplib2
from oauth2client.anyjson import simplejson as json
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run
from google.apputils import app
from google.apputils import appcommands
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'service_version',
'v1beta1',
'Google taskqueue api version.')
flags.DEFINE_string(
'api_host',
'https://www.googleapis.com/',
'API host name')
flags.DEFINE_string(
'project_name',
'default',
'The name of the Taskqueue API project.')
flags.DEFINE_bool(
'use_developer_key',
False,
'User wants to use the developer key while accessing taskqueue apis')
flags.DEFINE_string(
'developer_key_file',
'~/.taskqueue.apikey',
'Developer key provisioned from api console')
flags.DEFINE_bool(
'dump_request',
False,
'Prints the outgoing HTTP request along with headers and body.')
flags.DEFINE_string(
'credentials_file',
'taskqueue.dat',
'File where you want to store the auth credentails for later user')
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications <http://code.google.com/apis/accounts/docs/OAuth2.html#IA>
# The client_id client_secret are copied from the Identity tab on
# the Google APIs Console <http://code.google.com/apis/console>
FLOW = OAuth2WebServerFlow(
client_id='157776985798.apps.googleusercontent.com',
client_secret='tlpVCmaS6yLjxnnPu0ARIhNw',
scope='https://www.googleapis.com/auth/taskqueue',
user_agent='taskqueue-cmdline-sample/1.0')
class GoogleTaskQueueCommandBase(appcommands.Cmd):
"""Base class for all the Google TaskQueue client commands."""
DEFAULT_PROJECT_PATH = 'projects/default'
def __init__(self, name, flag_values):
super(GoogleTaskQueueCommandBase, self).__init__(name, flag_values)
def _dump_request_wrapper(self, http):
"""Dumps the outgoing HTTP request if requested.
Args:
http: An instance of httplib2.Http or something that acts like it.
Returns:
httplib2.Http like object.
"""
request_orig = http.request
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
"""Overrides the http.request method to add some utilities."""
if (FLAGS.api_host + "discovery/" not in uri and
FLAGS.use_developer_key):
developer_key_path = os.path.expanduser(
FLAGS.developer_key_file)
if not os.path.isfile(developer_key_path):
print 'Please generate developer key from the Google APIs' \
'Console and store it in %s' % (FLAGS.developer_key_file)
sys.exit()
developer_key_file = open(developer_key_path, 'r')
try:
developer_key = developer_key_file.read().strip()
except IOError, io_error:
print 'Error loading developer key from file %s' % (
FLAGS.developer_key_file)
print 'Error details: %s' % str(io_error)
sys.exit()
finally:
developer_key_file.close()
s = urlparse.urlparse(uri)
query = 'key=' + developer_key
if s.query:
query = s.query + '&key=' + developer_key
d = urlparse.ParseResult(s.scheme,
s.netloc,
s.path,
s.params,
query,
s.fragment)
uri = urlparse.urlunparse(d)
if FLAGS.dump_request:
print '--request-start--'
print '%s %s' % (method, uri)
if headers:
for (h, v) in headers.iteritems():
print '%s: %s' % (h, v)
print ''
if body:
print json.dumps(json.loads(body), sort_keys=True, indent=2)
print '--request-end--'
return request_orig(uri,
method,
body,
headers,
redirections,
connection_type)
http.request = new_request
return http
def Run(self, argv):
"""Run the command, printing the result.
Args:
argv: The non-flag arguments to the command.
"""
if not FLAGS.project_name:
raise app.UsageError('You must specify a project name'
' using the "--project_name" flag.')
discovery_uri = (
FLAGS.api_host + 'discovery/v1/apis/{api}/{apiVersion}/rest')
try:
# If the Credentials don't exist or are invalid run through the
# native client flow. The Storage object will ensure that if
# successful the good Credentials will get written back to a file.
# Setting FLAGS.auth_local_webserver to false since we can run our
# tool on Virtual Machines and we do not want to run the webserver
# on VMs.
FLAGS.auth_local_webserver = False
storage = Storage(FLAGS.credentials_file)
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run(FLOW, storage)
http = credentials.authorize(self._dump_request_wrapper(
httplib2.Http()))
api = build('taskqueue',
FLAGS.service_version,
http=http,
discoveryServiceUrl=discovery_uri)
result = self.run_with_api_and_flags_and_args(api, FLAGS, argv)
self.print_result(result)
except HttpError, http_error:
print 'Error Processing request: %s' % str(http_error)
def run_with_api_and_flags_and_args(self, api, flag_values, unused_argv):
"""Run the command given the API, flags, and args.
The default implementation of this method discards the args and
calls into run_with_api_and_flags.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
unused_argv: The non-flag arguments to the command.
Returns:
The result of running the command
"""
return self.run_with_api_and_flags(api, flag_values)
def print_result(self, result):
"""Pretty-print the result of the command.
The default behavior is to dump a formatted JSON encoding
of the result.
Args:
result: The JSON-serializable result to print.
"""
# We could have used the pprint module, but it produces
# noisy output due to all of our keys and values being
# unicode strings rather than simply ascii.
print json.dumps(result, sort_keys=True, indent=2)
class GoogleTaskQueueCommand(GoogleTaskQueueCommandBase):
"""Base command for working with the taskqueues collection."""
def __init__(self, name, flag_values):
super(GoogleTaskQueueCommand, self).__init__(name, flag_values)
flags.DEFINE_string('taskqueue_name',
'myqueue',
'TaskQueue name',
flag_values=flag_values)
def run_with_api_and_flags(self, api, flag_values):
"""Run the command, returning the result.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
Returns:
The result of running the command.
"""
taskqueue_request = self.build_request(api.taskqueues(), flag_values)
return taskqueue_request.execute()
class GoogleTaskCommand(GoogleTaskQueueCommandBase):
"""Base command for working with the tasks collection."""
def __init__(self, name, flag_values, need_task_flag=True):
super(GoogleTaskCommand, self).__init__(name, flag_values)
# Common flags that are shared by all the Task commands.
flags.DEFINE_string('taskqueue_name',
'myqueue',
'TaskQueue name',
flag_values=flag_values)
# Not all task commands need the task_name flag.
if need_task_flag:
flags.DEFINE_string('task_name',
None,
'Task name',
flag_values=flag_values)
def run_with_api_and_flags(self, api, flag_values):
"""Run the command, returning the result.
Args:
api: The handle to the Google TaskQueue API.
flag_values: The parsed command flags.
flags.DEFINE_string('payload',
None,
'Payload of the task')
Returns:
The result of running the command.
"""
task_request = self.build_request(api.tasks(), flag_values)
return task_request.execute()
| Python |
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Log settings for taskqueue_puller module."""
import logging
import logging.config
from google.apputils import app
import gflags as flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'log_output_file',
'/tmp/taskqueue-puller.log',
'Logfile name for taskqueue_puller.')
logger = logging.getLogger('TaskQueueClient')
def set_logger():
"""Settings for taskqueue_puller logger."""
logger.setLevel(logging.INFO)
# create formatter
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Set size of the log file and the backup count for rotated log files.
handler = logging.handlers.RotatingFileHandler(FLAGS.log_output_file,
maxBytes = 1024 * 1024,
backupCount = 5)
# add formatter to handler
handler.setFormatter(formatter)
# add formatter to handler
logger.addHandler(handler)
if __name__ == '__main__':
app.run()
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to do a sparse update of the account attributes.
Tags: accounts.patch
"""
__author__ = 'david.t@google.com (David Torres)'
import pprint
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account to which submit the creative',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
gflags.DEFINE_string('cookie_matching_url', None,
'New cookie matching URL to set for the account ',
short_name='u')
gflags.MarkFlagAsRequired('cookie_matching_url')
def main(argv):
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
cookie_matching_url = gflags.FLAGS.cookie_matching_url
pretty_printer = pprint.PrettyPrinter()
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Account information to be updated.
account_body = {
'accountId': account_id,
'cookieMatchingUrl': cookie_matching_url
}
account = service.accounts().patch(id=account_id,
body=account_body).execute()
pretty_printer.pprint(account)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auxiliary file for Ad Exchange Buyer API code samples.
Handles various tasks to do with logging, authentication and initialization.
"""
__author__ = 'david.t@google.com (David Torres)'
import logging
import os
import sys
from apiclient.discovery import build
import gflags
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(
CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/adexchange.buyer',
message=MISSING_CLIENT_SECRETS_MESSAGE
)
# The gflags module makes defining command-line options easy for applications.
# Run this program with the '--help' argument to see all the flags that it
# understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def process_flags(argv):
"""Uses the command-line flags to set the logging level."""
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def initialize_service():
"""Initializes and returns an instance of the Ad Exchange Buyer service.
Authorizes the user for use of the service and returns it backs.
Returns:
The authorized and initialized service.
"""
# Create an httplib2.Http object to handle our HTTP requests.
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to a file.
storage = Storage('adexchangebuyer.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
http = credentials.authorize(http)
# Construct a service object via the discovery service.
service = build('adexchangebuyer', 'v1', http=http)
return service
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to retrieve the information of a creative.
Tags: creatives.insert
"""
__author__ = 'david.t@google.com (David Torres)'
import pprint
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account that contains the creative',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
gflags.DEFINE_string('adgroup_id', None,
'The pretargeting adgroup id to which the creative is '
'associated with',
short_name='g')
gflags.MarkFlagAsRequired('adgroup_id')
gflags.DEFINE_string('buyer_creative_id', None,
'A buyer-specific id that identifies this creative',
short_name='c')
gflags.MarkFlagAsRequired('buyer_creative_id')
def main(argv):
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
adgroup_id = gflags.FLAGS.adgroup_id
buyer_creative_id = gflags.FLAGS.buyer_creative_id
pretty_printer = pprint.PrettyPrinter()
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Construct the request.
request = service.creatives().get(accountId=account_id,
adgroupId=adgroup_id,
buyerCreativeId=buyer_creative_id)
# Execute request and print response.
pretty_printer.pprint(request.execute())
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all accounts for the logged in user.
Tags: accounts.list
"""
__author__ = 'david.t@google.com (David Torres)'
import pprint
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
def main(argv):
sample_utils.process_flags(argv)
pretty_printer = pprint.PrettyPrinter()
# Authenticate and construct service
service = sample_utils.initialize_service()
try:
# Retrieve account list and display data as received
result = service.accounts().list().execute()
pretty_printer.pprint(result)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets the active direct deals associated to the logged in user.
Tags: directDeals.list
"""
__author__ = 'david.t@google.com (David Torres)'
import pprint
import sys
from oauth2client.client import AccessTokenRefreshError
import sample_utils
def main(argv):
sample_utils.process_flags(argv)
pretty_printer = pprint.PrettyPrinter()
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Retrieve direct deals and display them as received if any.
result = service.directDeals().list().execute()
if 'direct_deals' in result:
deals = result['direct_deals']
for deal in deals:
pretty_printer.pprint(deal)
else:
print 'No direct deals found'
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates how to submit a new creative for its verification.
Tags: creatives.insert
"""
__author__ = 'david.t@google.com (David Torres)'
import pprint
import sys
import gflags
from oauth2client.client import AccessTokenRefreshError
import sample_utils
# Declare command-line flags, and set them as required.
gflags.DEFINE_string('account_id', None,
'The ID of the account to which submit the creative',
short_name='a')
gflags.MarkFlagAsRequired('account_id')
gflags.DEFINE_string('adgroup_id', None,
'The pretargeting adgroup id that this creative will be '
'associated with',
short_name='g')
gflags.MarkFlagAsRequired('adgroup_id')
gflags.DEFINE_string('buyer_creative_id', None,
'A buyer-specific id identifying the creative in this ad',
short_name='c')
gflags.MarkFlagAsRequired('buyer_creative_id')
def main(argv):
sample_utils.process_flags(argv)
account_id = gflags.FLAGS.account_id
adgroup_id = gflags.FLAGS.adgroup_id
buyer_creative_id = gflags.FLAGS.buyer_creative_id
pretty_printer = pprint.PrettyPrinter()
# Authenticate and construct service.
service = sample_utils.initialize_service()
try:
# Create a new creative to submit.
creative_body = {
'accountId': account_id,
'adgroupId': adgroup_id,
'buyerCreativeId': buyer_creative_id,
'HTMLSnippet': ('<html><body><a href="http://www.google.com">'
'Hi there!</a></body></html>'),
'clickThroughUrl': ['http://www.google.com'],
'width': 300,
'height': 250,
'advertiserName': 'google'
}
creative = service.creatives().insert(body=creative_body).execute()
# Print the response. If the creative has been already reviewed, its status
# and categories will be included in the response.
pretty_printer.pprint(creative)
except AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading diffs from a version control system to the codereview app.
Usage summary: upload.py [options] [-- diff_options] [path...]
Diff options are passed to the diff command of the underlying system.
Supported version control systems:
Git
Mercurial
Subversion
Perforce
CVS
It is important for Git/Mercurial users to specify a tree/node/branch to diff
against by using the '--rev' option.
"""
# This code is derived from appcfg.py in the App Engine SDK (open source),
# and from ASPN recipe #146306.
import ConfigParser
import cookielib
import fnmatch
import getpass
import logging
import marshal
import mimetypes
import optparse
import os
import re
import socket
import subprocess
import sys
import urllib
import urllib2
import urlparse
# The md5 module was deprecated in Python 2.5.
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
import readline
except ImportError:
pass
try:
import keyring
except ImportError:
keyring = None
# The logging verbosity:
# 0: Errors only.
# 1: Status messages.
# 2: Info logs.
# 3: Debug logs.
verbosity = 1
# The account type used for authentication.
# This line could be changed by the review server (see handler for
# upload.py).
AUTH_ACCOUNT_TYPE = "GOOGLE"
# URL of the default review server. As for AUTH_ACCOUNT_TYPE, this line could be
# changed by the review server (see handler for upload.py).
DEFAULT_REVIEW_SERVER = "codereview.appspot.com"
# Max size of patch or base file.
MAX_UPLOAD_SIZE = 900 * 1024
# Constants for version control names. Used by GuessVCSName.
VCS_GIT = "Git"
VCS_MERCURIAL = "Mercurial"
VCS_SUBVERSION = "Subversion"
VCS_PERFORCE = "Perforce"
VCS_CVS = "CVS"
VCS_UNKNOWN = "Unknown"
# whitelist for non-binary filetypes which do not start with "text/"
# .mm (Objective-C) shows up as application/x-freemind on my Linux box.
TEXT_MIMETYPES = ['application/javascript', 'application/json',
'application/x-javascript', 'application/xml',
'application/x-freemind', 'application/x-sh']
VCS_ABBREVIATIONS = {
VCS_MERCURIAL.lower(): VCS_MERCURIAL,
"hg": VCS_MERCURIAL,
VCS_SUBVERSION.lower(): VCS_SUBVERSION,
"svn": VCS_SUBVERSION,
VCS_PERFORCE.lower(): VCS_PERFORCE,
"p4": VCS_PERFORCE,
VCS_GIT.lower(): VCS_GIT,
VCS_CVS.lower(): VCS_CVS,
}
# The result of parsing Subversion's [auto-props] setting.
svn_auto_props_map = None
def GetEmail(prompt):
"""Prompts the user for their email address and returns it.
The last used email address is saved to a file and offered up as a suggestion
to the user. If the user presses enter without typing in anything the last
used email address is used. If the user enters a new address, it is saved
for next time we prompt.
"""
last_email_file_name = os.path.expanduser("~/.last_codereview_email_address")
last_email = ""
if os.path.exists(last_email_file_name):
try:
last_email_file = open(last_email_file_name, "r")
last_email = last_email_file.readline().strip("\n")
last_email_file.close()
prompt += " [%s]" % last_email
except IOError, e:
pass
email = raw_input(prompt + ": ").strip()
if email:
try:
last_email_file = open(last_email_file_name, "w")
last_email_file.write(email)
last_email_file.close()
except IOError, e:
pass
else:
email = last_email
return email
def StatusUpdate(msg):
"""Print a status message to stdout.
If 'verbosity' is greater than 0, print the message.
Args:
msg: The string to print.
"""
if verbosity > 0:
print msg
def ErrorExit(msg):
"""Print an error message to stderr and exit."""
print >>sys.stderr, msg
sys.exit(1)
class ClientLoginError(urllib2.HTTPError):
"""Raised to indicate there was an error authenticating with ClientLogin."""
def __init__(self, url, code, msg, headers, args):
urllib2.HTTPError.__init__(self, url, code, msg, headers, None)
self.args = args
self.reason = args["Error"]
class AbstractRpcServer(object):
"""Provides a common interface for a simple RPC server."""
def __init__(self, host, auth_function, host_override=None, extra_headers={},
save_cookies=False, account_type=AUTH_ACCOUNT_TYPE):
"""Creates a new HttpRpcServer.
Args:
host: The host to send requests to.
auth_function: A function that takes no arguments and returns an
(email, password) tuple when called. Will be called if authentication
is required.
host_override: The host header to send to the server (defaults to host).
extra_headers: A dict of extra headers to append to every request.
save_cookies: If True, save the authentication cookies to local disk.
If False, use an in-memory cookiejar instead. Subclasses must
implement this functionality. Defaults to False.
account_type: Account type used for authentication. Defaults to
AUTH_ACCOUNT_TYPE.
"""
self.host = host
if (not self.host.startswith("http://") and
not self.host.startswith("https://")):
self.host = "http://" + self.host
self.host_override = host_override
self.auth_function = auth_function
self.authenticated = False
self.extra_headers = extra_headers
self.save_cookies = save_cookies
self.account_type = account_type
self.opener = self._GetOpener()
if self.host_override:
logging.info("Server: %s; Host: %s", self.host, self.host_override)
else:
logging.info("Server: %s", self.host)
def _GetOpener(self):
"""Returns an OpenerDirector for making HTTP requests.
Returns:
A urllib2.OpenerDirector object.
"""
raise NotImplementedError()
def _CreateRequest(self, url, data=None):
"""Creates a new urllib request."""
logging.debug("Creating request for: '%s' with payload:\n%s", url, data)
req = urllib2.Request(url, data=data)
if self.host_override:
req.add_header("Host", self.host_override)
for key, value in self.extra_headers.iteritems():
req.add_header(key, value)
return req
def _GetAuthToken(self, email, password):
"""Uses ClientLogin to authenticate the user, returning an auth token.
Args:
email: The user's email address
password: The user's password
Raises:
ClientLoginError: If there was an error authenticating with ClientLogin.
HTTPError: If there was some other form of HTTP error.
Returns:
The authentication token returned by ClientLogin.
"""
account_type = self.account_type
if self.host.endswith(".google.com"):
# Needed for use inside Google.
account_type = "HOSTED"
req = self._CreateRequest(
url="https://www.google.com/accounts/ClientLogin",
data=urllib.urlencode({
"Email": email,
"Passwd": password,
"service": "ah",
"source": "rietveld-codereview-upload",
"accountType": account_type,
}),
)
try:
response = self.opener.open(req)
response_body = response.read()
response_dict = dict(x.split("=")
for x in response_body.split("\n") if x)
return response_dict["Auth"]
except urllib2.HTTPError, e:
if e.code == 403:
body = e.read()
response_dict = dict(x.split("=", 1) for x in body.split("\n") if x)
raise ClientLoginError(req.get_full_url(), e.code, e.msg,
e.headers, response_dict)
else:
raise
def _GetAuthCookie(self, auth_token):
"""Fetches authentication cookies for an authentication token.
Args:
auth_token: The authentication token returned by ClientLogin.
Raises:
HTTPError: If there was an error fetching the authentication cookies.
"""
# This is a dummy value to allow us to identify when we're successful.
continue_location = "http://localhost/"
args = {"continue": continue_location, "auth": auth_token}
req = self._CreateRequest("%s/_ah/login?%s" %
(self.host, urllib.urlencode(args)))
try:
response = self.opener.open(req)
except urllib2.HTTPError, e:
response = e
if (response.code != 302 or
response.info()["location"] != continue_location):
raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg,
response.headers, response.fp)
self.authenticated = True
def _Authenticate(self):
"""Authenticates the user.
The authentication process works as follows:
1) We get a username and password from the user
2) We use ClientLogin to obtain an AUTH token for the user
(see http://code.google.com/apis/accounts/AuthForInstalledApps.html).
3) We pass the auth token to /_ah/login on the server to obtain an
authentication cookie. If login was successful, it tries to redirect
us to the URL we provided.
If we attempt to access the upload API without first obtaining an
authentication cookie, it returns a 401 response (or a 302) and
directs us to authenticate ourselves with ClientLogin.
"""
for i in range(3):
credentials = self.auth_function()
try:
auth_token = self._GetAuthToken(credentials[0], credentials[1])
except ClientLoginError, e:
if e.reason == "BadAuthentication":
print >>sys.stderr, "Invalid username or password."
continue
if e.reason == "CaptchaRequired":
print >>sys.stderr, (
"Please go to\n"
"https://www.google.com/accounts/DisplayUnlockCaptcha\n"
"and verify you are a human. Then try again.\n"
"If you are using a Google Apps account the URL is:\n"
"https://www.google.com/a/yourdomain.com/UnlockCaptcha")
break
if e.reason == "NotVerified":
print >>sys.stderr, "Account not verified."
break
if e.reason == "TermsNotAgreed":
print >>sys.stderr, "User has not agreed to TOS."
break
if e.reason == "AccountDeleted":
print >>sys.stderr, "The user account has been deleted."
break
if e.reason == "AccountDisabled":
print >>sys.stderr, "The user account has been disabled."
break
if e.reason == "ServiceDisabled":
print >>sys.stderr, ("The user's access to the service has been "
"disabled.")
break
if e.reason == "ServiceUnavailable":
print >>sys.stderr, "The service is not available; try again later."
break
raise
self._GetAuthCookie(auth_token)
return
def Send(self, request_path, payload=None,
content_type="application/octet-stream",
timeout=None,
extra_headers=None,
**kwargs):
"""Sends an RPC and returns the response.
Args:
request_path: The path to send the request to, eg /api/appversion/create.
payload: The body of the request, or None to send an empty request.
content_type: The Content-Type header to use.
timeout: timeout in seconds; default None i.e. no timeout.
(Note: for large requests on OS X, the timeout doesn't work right.)
extra_headers: Dict containing additional HTTP headers that should be
included in the request (string header names mapped to their values),
or None to not include any additional headers.
kwargs: Any keyword arguments are converted into query string parameters.
Returns:
The response body, as a string.
"""
# TODO: Don't require authentication. Let the server say
# whether it is necessary.
if not self.authenticated:
self._Authenticate()
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
tries = 0
while True:
tries += 1
args = dict(kwargs)
url = "%s%s" % (self.host, request_path)
if args:
url += "?" + urllib.urlencode(args)
req = self._CreateRequest(url=url, data=payload)
req.add_header("Content-Type", content_type)
if extra_headers:
for header, value in extra_headers.items():
req.add_header(header, value)
try:
f = self.opener.open(req)
response = f.read()
f.close()
return response
except urllib2.HTTPError, e:
if tries > 3:
raise
elif e.code == 401 or e.code == 302:
self._Authenticate()
## elif e.code >= 500 and e.code < 600:
## # Server Error - try again.
## continue
elif e.code == 301:
# Handle permanent redirect manually.
url = e.info()["location"]
url_loc = urlparse.urlparse(url)
self.host = '%s://%s' % (url_loc[0], url_loc[1])
else:
raise
finally:
socket.setdefaulttimeout(old_timeout)
class HttpRpcServer(AbstractRpcServer):
"""Provides a simplified RPC-style interface for HTTP requests."""
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save()
def _GetOpener(self):
"""Returns an OpenerDirector that supports cookies and ignores redirects.
Returns:
A urllib2.OpenerDirector object.
"""
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.ProxyHandler())
opener.add_handler(urllib2.UnknownHandler())
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.HTTPDefaultErrorHandler())
opener.add_handler(urllib2.HTTPSHandler())
opener.add_handler(urllib2.HTTPErrorProcessor())
if self.save_cookies:
self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies")
self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file)
if os.path.exists(self.cookie_file):
try:
self.cookie_jar.load()
self.authenticated = True
StatusUpdate("Loaded authentication cookies from %s" %
self.cookie_file)
except (cookielib.LoadError, IOError):
# Failed to load cookies - just ignore them.
pass
else:
# Create an empty cookie file with mode 600
fd = os.open(self.cookie_file, os.O_CREAT, 0600)
os.close(fd)
# Always chmod the cookie file
os.chmod(self.cookie_file, 0600)
else:
# Don't save cookies across runs of update.py.
self.cookie_jar = cookielib.CookieJar()
opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar))
return opener
parser = optparse.OptionParser(
usage="%prog [options] [-- diff_options] [path...]")
parser.add_option("-y", "--assume_yes", action="store_true",
dest="assume_yes", default=False,
help="Assume that the answer to yes/no questions is 'yes'.")
# Logging
group = parser.add_option_group("Logging options")
group.add_option("-q", "--quiet", action="store_const", const=0,
dest="verbose", help="Print errors only.")
group.add_option("-v", "--verbose", action="store_const", const=2,
dest="verbose", default=1,
help="Print info level logs.")
group.add_option("--noisy", action="store_const", const=3,
dest="verbose", help="Print all logs.")
group.add_option("--print_diffs", dest="print_diffs", action="store_true",
help="Print full diffs.")
# Review server
group = parser.add_option_group("Review server options")
group.add_option("-s", "--server", action="store", dest="server",
default=DEFAULT_REVIEW_SERVER,
metavar="SERVER",
help=("The server to upload to. The format is host[:port]. "
"Defaults to '%default'."))
group.add_option("-e", "--email", action="store", dest="email",
metavar="EMAIL", default=None,
help="The username to use. Will prompt if omitted.")
group.add_option("-H", "--host", action="store", dest="host",
metavar="HOST", default=None,
help="Overrides the Host header sent with all RPCs.")
group.add_option("--no_cookies", action="store_false",
dest="save_cookies", default=True,
help="Do not save authentication cookies to local disk.")
group.add_option("--account_type", action="store", dest="account_type",
metavar="TYPE", default=AUTH_ACCOUNT_TYPE,
choices=["GOOGLE", "HOSTED"],
help=("Override the default account type "
"(defaults to '%default', "
"valid choices are 'GOOGLE' and 'HOSTED')."))
# Issue
group = parser.add_option_group("Issue options")
group.add_option("-d", "--description", action="store", dest="description",
metavar="DESCRIPTION", default=None,
help="Optional description when creating an issue.")
group.add_option("-f", "--description_file", action="store",
dest="description_file", metavar="DESCRIPTION_FILE",
default=None,
help="Optional path of a file that contains "
"the description when creating an issue.")
group.add_option("-r", "--reviewers", action="store", dest="reviewers",
metavar="REVIEWERS", default="jcgregorio@google.com",
help="Add reviewers (comma separated email addresses).")
group.add_option("--cc", action="store", dest="cc",
metavar="CC",
default="google-api-python-client@googlegroups.com",
help="Add CC (comma separated email addresses).")
group.add_option("--private", action="store_true", dest="private",
default=False,
help="Make the issue restricted to reviewers and those CCed")
# Upload options
group = parser.add_option_group("Patch options")
group.add_option("-m", "--message", action="store", dest="message",
metavar="MESSAGE", default=None,
help="A message to identify the patch. "
"Will prompt if omitted.")
group.add_option("-i", "--issue", type="int", action="store",
metavar="ISSUE", default=None,
help="Issue number to which to add. Defaults to new issue.")
group.add_option("--base_url", action="store", dest="base_url", default=None,
help="Base repository URL (listed as \"Base URL\" when "
"viewing issue). If omitted, will be guessed automatically "
"for SVN repos and left blank for others.")
group.add_option("--download_base", action="store_true",
dest="download_base", default=False,
help="Base files will be downloaded by the server "
"(side-by-side diffs may not work on files with CRs).")
group.add_option("--rev", action="store", dest="revision",
metavar="REV", default=None,
help="Base revision/branch/tree to diff against. Use "
"rev1:rev2 range to review already committed changeset.")
group.add_option("--send_mail", action="store_true",
dest="send_mail", default=False,
help="Send notification email to reviewers.")
group.add_option("--vcs", action="store", dest="vcs",
metavar="VCS", default=None,
help=("Version control system (optional, usually upload.py "
"already guesses the right VCS)."))
group.add_option("--emulate_svn_auto_props", action="store_true",
dest="emulate_svn_auto_props", default=False,
help=("Emulate Subversion's auto properties feature."))
# Perforce-specific
group = parser.add_option_group("Perforce-specific options "
"(overrides P4 environment variables)")
group.add_option("--p4_port", action="store", dest="p4_port",
metavar="P4_PORT", default=None,
help=("Perforce server and port (optional)"))
group.add_option("--p4_changelist", action="store", dest="p4_changelist",
metavar="P4_CHANGELIST", default=None,
help=("Perforce changelist id"))
group.add_option("--p4_client", action="store", dest="p4_client",
metavar="P4_CLIENT", default=None,
help=("Perforce client/workspace"))
group.add_option("--p4_user", action="store", dest="p4_user",
metavar="P4_USER", default=None,
help=("Perforce user"))
def GetRpcServer(server, email=None, host_override=None, save_cookies=True,
account_type=AUTH_ACCOUNT_TYPE):
"""Returns an instance of an AbstractRpcServer.
Args:
server: String containing the review server URL.
email: String containing user's email address.
host_override: If not None, string containing an alternate hostname to use
in the host header.
save_cookies: Whether authentication cookies should be saved to disk.
account_type: Account type for authentication, either 'GOOGLE'
or 'HOSTED'. Defaults to AUTH_ACCOUNT_TYPE.
Returns:
A new AbstractRpcServer, on which RPC calls can be made.
"""
rpc_server_class = HttpRpcServer
# If this is the dev_appserver, use fake authentication.
host = (host_override or server).lower()
if re.match(r'(http://)?localhost([:/]|$)', host):
if email is None:
email = "test@example.com"
logging.info("Using debug user %s. Override with --email" % email)
server = rpc_server_class(
server,
lambda: (email, "password"),
host_override=host_override,
extra_headers={"Cookie":
'dev_appserver_login="%s:False"' % email},
save_cookies=save_cookies,
account_type=account_type)
# Don't try to talk to ClientLogin.
server.authenticated = True
return server
def GetUserCredentials():
"""Prompts the user for a username and password."""
# Create a local alias to the email variable to avoid Python's crazy
# scoping rules.
local_email = email
if local_email is None:
local_email = GetEmail("Email (login for uploading to %s)" % server)
password = None
if keyring:
password = keyring.get_password(host, local_email)
if password is not None:
print "Using password from system keyring."
else:
password = getpass.getpass("Password for %s: " % local_email)
if keyring:
answer = raw_input("Store password in system keyring?(y/N) ").strip()
if answer == "y":
keyring.set_password(host, local_email, password)
return (local_email, password)
return rpc_server_class(server,
GetUserCredentials,
host_override=host_override,
save_cookies=save_cookies)
def EncodeMultipartFormData(fields, files):
"""Encode form fields for multipart/form-data.
Args:
fields: A sequence of (name, value) elements for regular form fields.
files: A sequence of (name, filename, value) elements for data to be
uploaded as files.
Returns:
(content_type, body) ready for httplib.HTTP instance.
Source:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
"""
BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-'
CRLF = '\r\n'
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename))
lines.append('Content-Type: %s' % GetContentType(filename))
lines.append('')
if isinstance(value, unicode):
value = value.encode('utf-8')
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('')
body = CRLF.join(lines)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def GetContentType(filename):
"""Helper to guess the content-type from the filename."""
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
# Use a shell for subcommands on Windows to get a PATH search.
use_shell = sys.platform.startswith("win")
def RunShellWithReturnCode(command, print_output=False,
universal_newlines=True,
env=os.environ):
"""Executes a command and returns the output from stdout and the return code.
Args:
command: Command to execute.
print_output: If True, the output is printed to stdout.
If False, both stdout and stderr are ignored.
universal_newlines: Use universal_newlines flag (default: True).
Returns:
Tuple (output, return code)
"""
logging.info("Running %s", command)
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=use_shell, universal_newlines=universal_newlines,
env=env)
if print_output:
output_array = []
while True:
line = p.stdout.readline()
if not line:
break
print line.strip("\n")
output_array.append(line)
output = "".join(output_array)
else:
output = p.stdout.read()
p.wait()
errout = p.stderr.read()
if print_output and errout:
print >>sys.stderr, errout
p.stdout.close()
p.stderr.close()
return output, p.returncode
def RunShell(command, silent_ok=False, universal_newlines=True,
print_output=False, env=os.environ):
data, retcode = RunShellWithReturnCode(command, print_output,
universal_newlines, env)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (command, data))
if not silent_ok and not data:
ErrorExit("No output from %s" % command)
return data
class VersionControlSystem(object):
"""Abstract base class providing an interface to the VCS."""
def __init__(self, options):
"""Constructor.
Args:
options: Command line options.
"""
self.options = options
def PostProcessDiff(self, diff):
"""Return the diff with any special post processing this VCS needs, e.g.
to include an svn-style "Index:"."""
return diff
def GenerateDiff(self, args):
"""Return the current diff as a string.
Args:
args: Extra arguments to pass to the diff command.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def CheckForUnknownFiles(self):
"""Show an "are you sure?" prompt if there are unknown files."""
unknown_files = self.GetUnknownFiles()
if unknown_files:
print "The following files are not added to version control:"
for line in unknown_files:
print line
prompt = "Are you sure to continue?(y/N) "
answer = raw_input(prompt).strip()
if answer != "y":
ErrorExit("User aborted")
def GetBaseFile(self, filename):
"""Get the content of the upstream version of a file.
Returns:
A tuple (base_content, new_content, is_binary, status)
base_content: The contents of the base file.
new_content: For text files, this is empty. For binary files, this is
the contents of the new file, since the diff output won't contain
information to reconstruct the current file.
is_binary: True iff the file is binary.
status: The status of the file.
"""
raise NotImplementedError(
"abstract method -- subclass %s must override" % self.__class__)
def GetBaseFiles(self, diff):
"""Helper that calls GetBase file for each file in the patch.
Returns:
A dictionary that maps from filename to GetBaseFile's tuple. Filenames
are retrieved based on lines that start with "Index:" or
"Property changes on:".
"""
files = {}
for line in diff.splitlines(True):
if line.startswith('Index:') or line.startswith('Property changes on:'):
unused, filename = line.split(':', 1)
# On Windows if a file has property changes its filename uses '\'
# instead of '/'.
filename = filename.strip().replace('\\', '/')
files[filename] = self.GetBaseFile(filename)
return files
def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options,
files):
"""Uploads the base files (and if necessary, the current ones as well)."""
def UploadFile(filename, file_id, content, is_binary, status, is_base):
"""Uploads a file to the server."""
file_too_large = False
if is_base:
type = "base"
else:
type = "current"
if len(content) > MAX_UPLOAD_SIZE:
print ("Not uploading the %s file for %s because it's too large." %
(type, filename))
file_too_large = True
content = ""
checksum = md5(content).hexdigest()
if options.verbose > 0 and not file_too_large:
print "Uploading %s file for %s" % (type, filename)
url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id)
form_fields = [("filename", filename),
("status", status),
("checksum", checksum),
("is_binary", str(is_binary)),
("is_current", str(not is_base)),
]
if file_too_large:
form_fields.append(("file_too_large", "1"))
if options.email:
form_fields.append(("user", options.email))
ctype, body = EncodeMultipartFormData(form_fields,
[("data", filename, content)])
response_body = rpc_server.Send(url, body,
content_type=ctype)
if not response_body.startswith("OK"):
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
patches = dict()
[patches.setdefault(v, k) for k, v in patch_list]
for filename in patches.keys():
base_content, new_content, is_binary, status = files[filename]
file_id_str = patches.get(filename)
if file_id_str.find("nobase") != -1:
base_content = None
file_id_str = file_id_str[file_id_str.rfind("_") + 1:]
file_id = int(file_id_str)
if base_content != None:
UploadFile(filename, file_id, base_content, is_binary, status, True)
if new_content != None:
UploadFile(filename, file_id, new_content, is_binary, status, False)
def IsImage(self, filename):
"""Returns true if the filename has an image extension."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False
return mimetype.startswith("image/")
def IsBinary(self, filename):
"""Returns true if the guessed mimetyped isnt't in text group."""
mimetype = mimetypes.guess_type(filename)[0]
if not mimetype:
return False # e.g. README, "real" binaries usually have an extension
# special case for text files which don't start with text/
if mimetype in TEXT_MIMETYPES:
return False
return not mimetype.startswith("text/")
class SubversionVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Subversion."""
def __init__(self, options):
super(SubversionVCS, self).__init__(options)
if self.options.revision:
match = re.match(r"(\d+)(:(\d+))?", self.options.revision)
if not match:
ErrorExit("Invalid Subversion revision %s." % self.options.revision)
self.rev_start = match.group(1)
self.rev_end = match.group(3)
else:
self.rev_start = self.rev_end = None
# Cache output from "svn list -r REVNO dirname".
# Keys: dirname, Values: 2-tuple (ouput for start rev and end rev).
self.svnls_cache = {}
# Base URL is required to fetch files deleted in an older revision.
# Result is cached to not guess it over and over again in GetBaseFile().
required = self.options.download_base or self.options.revision is not None
self.svn_base = self._GuessBase(required)
def GuessBase(self, required):
"""Wrapper for _GuessBase."""
return self.svn_base
def _GuessBase(self, required):
"""Returns base URL for current diff.
Args:
required: If true, exits if the url can't be guessed, otherwise None is
returned.
"""
info = RunShell(["svn", "info"])
for line in info.splitlines():
if line.startswith("URL: "):
url = line.split()[1]
scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)
guess = ""
if netloc == "svn.python.org" and scheme == "svn+ssh":
path = "projects" + path
scheme = "http"
guess = "Python "
elif netloc.endswith(".googlecode.com"):
scheme = "http"
guess = "Google Code "
path = path + "/"
base = urlparse.urlunparse((scheme, netloc, path, params,
query, fragment))
logging.info("Guessed %sbase = %s", guess, base)
return base
if required:
ErrorExit("Can't find URL in output from svn info")
return None
def GenerateDiff(self, args):
cmd = ["svn", "diff"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(args)
data = RunShell(cmd)
count = 0
for line in data.splitlines():
if line.startswith("Index:") or line.startswith("Property changes on:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from svn diff")
return data
def _CollapseKeywords(self, content, keyword_str):
"""Collapses SVN keywords."""
# svn cat translates keywords but svn diff doesn't. As a result of this
# behavior patching.PatchChunks() fails with a chunk mismatch error.
# This part was originally written by the Review Board development team
# who had the same problem (http://reviews.review-board.org/r/276/).
# Mapping of keywords to known aliases
svn_keywords = {
# Standard keywords
'Date': ['Date', 'LastChangedDate'],
'Revision': ['Revision', 'LastChangedRevision', 'Rev'],
'Author': ['Author', 'LastChangedBy'],
'HeadURL': ['HeadURL', 'URL'],
'Id': ['Id'],
# Aliases
'LastChangedDate': ['LastChangedDate', 'Date'],
'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'],
'LastChangedBy': ['LastChangedBy', 'Author'],
'URL': ['URL', 'HeadURL'],
}
def repl(m):
if m.group(2):
return "$%s::%s$" % (m.group(1), " " * len(m.group(3)))
return "$%s$" % m.group(1)
keywords = [keyword
for name in keyword_str.split(" ")
for keyword in svn_keywords.get(name, [])]
return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content)
def GetUnknownFiles(self):
status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
def ReadFile(self, filename):
"""Returns the contents of a file."""
file = open(filename, 'rb')
result = ""
try:
result = file.read()
finally:
file.close()
return result
def GetStatus(self, filename):
"""Returns the status of a file."""
if not self.options.revision:
status = RunShell(["svn", "status", "--ignore-externals", filename])
if not status:
ErrorExit("svn status returned no output for %s" % filename)
status_lines = status.splitlines()
# If file is in a cl, the output will begin with
# "\n--- Changelist 'cl_name':\n". See
# http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt
if (len(status_lines) == 3 and
not status_lines[0] and
status_lines[1].startswith("--- Changelist")):
status = status_lines[2]
else:
status = status_lines[0]
# If we have a revision to diff against we need to run "svn list"
# for the old and the new revision and compare the results to get
# the correct status for a file.
else:
dirname, relfilename = os.path.split(filename)
if dirname not in self.svnls_cache:
cmd = ["svn", "list", "-r", self.rev_start, dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to get status for %s." % filename)
old_files = out.splitlines()
args = ["svn", "list"]
if self.rev_end:
args += ["-r", self.rev_end]
cmd = args + [dirname or "."]
out, returncode = RunShellWithReturnCode(cmd)
if returncode:
ErrorExit("Failed to run command %s" % cmd)
self.svnls_cache[dirname] = (old_files, out.splitlines())
old_files, new_files = self.svnls_cache[dirname]
if relfilename in old_files and relfilename not in new_files:
status = "D "
elif relfilename in old_files and relfilename in new_files:
status = "M "
else:
status = "A "
return status
def GetBaseFile(self, filename):
status = self.GetStatus(filename)
base_content = None
new_content = None
# If a file is copied its status will be "A +", which signifies
# "addition-with-history". See "svn st" for more information. We need to
# upload the original file or else diff parsing will fail if the file was
# edited.
if status[0] == "A" and status[3] != "+":
# We'll need to upload the new content if we're adding a binary file
# since diff's output won't contain it.
mimetype = RunShell(["svn", "propget", "svn:mime-type", filename],
silent_ok=True)
base_content = ""
is_binary = bool(mimetype) and not mimetype.startswith("text/")
if is_binary and self.IsImage(filename):
new_content = self.ReadFile(filename)
elif (status[0] in ("M", "D", "R") or
(status[0] == "A" and status[3] == "+") or # Copied file.
(status[0] == " " and status[1] == "M")): # Property change.
args = []
if self.options.revision:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
# Don't change filename, it's needed later.
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:mime-type", url]
mimetype, returncode = RunShellWithReturnCode(cmd)
if returncode:
# File does not exist in the requested revision.
# Reset mimetype, it contains an error message.
mimetype = ""
else:
mimetype = mimetype.strip()
get_base = False
is_binary = (bool(mimetype) and
not mimetype.startswith("text/") and
not mimetype in TEXT_MIMETYPES)
if status[0] == " ":
# Empty base content just to force an upload.
base_content = ""
elif is_binary:
if self.IsImage(filename):
get_base = True
if status[0] == "M":
if not self.rev_end:
new_content = self.ReadFile(filename)
else:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end)
new_content = RunShell(["svn", "cat", url],
universal_newlines=True, silent_ok=True)
else:
base_content = ""
else:
get_base = True
if get_base:
if is_binary:
universal_newlines = False
else:
universal_newlines = True
if self.rev_start:
# "svn cat -r REV delete_file.txt" doesn't work. cat requires
# the full URL with "@REV" appended instead of using "-r" option.
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
else:
base_content, ret_code = RunShellWithReturnCode(
["svn", "cat", filename], universal_newlines=universal_newlines)
if ret_code and status[0] == "R":
# It's a replaced file without local history (see issue208).
# The base file needs to be fetched from the server.
url = "%s/%s" % (self.svn_base, filename)
base_content = RunShell(["svn", "cat", url],
universal_newlines=universal_newlines,
silent_ok=True)
elif ret_code:
ErrorExit("Got error status from 'svn cat %s'" % filename)
if not is_binary:
args = []
if self.rev_start:
url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start)
else:
url = filename
args += ["-r", "BASE"]
cmd = ["svn"] + args + ["propget", "svn:keywords", url]
keywords, returncode = RunShellWithReturnCode(cmd)
if keywords and not returncode:
base_content = self._CollapseKeywords(base_content, keywords)
else:
StatusUpdate("svn status returned unexpected output: %s" % status)
sys.exit(1)
return base_content, new_content, is_binary, status[0:5]
class GitVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Git."""
def __init__(self, options):
super(GitVCS, self).__init__(options)
# Map of filename -> (hash before, hash after) of base file.
# Hashes for "no such file" are represented as None.
self.hashes = {}
# Map of new filename -> old filename for renames.
self.renames = {}
def PostProcessDiff(self, gitdiff):
"""Converts the diff output to include an svn-style "Index:" line as well
as record the hashes of the files, so we can upload them along with our
diff."""
# Special used by git to indicate "no such content".
NULL_HASH = "0"*40
def IsFileNew(filename):
return filename in self.hashes and self.hashes[filename][0] is None
def AddSubversionPropertyChange(filename):
"""Add svn's property change information into the patch if given file is
new file.
We use Subversion's auto-props setting to retrieve its property.
See http://svnbook.red-bean.com/en/1.1/ch07.html#svn-ch-7-sect-1.3.2 for
Subversion's [auto-props] setting.
"""
if self.options.emulate_svn_auto_props and IsFileNew(filename):
svnprops = GetSubversionPropertyChanges(filename)
if svnprops:
svndiff.append("\n" + svnprops + "\n")
svndiff = []
filecount = 0
filename = None
for line in gitdiff.splitlines():
match = re.match(r"diff --git a/(.*) b/(.*)$", line)
if match:
# Add auto property here for previously seen file.
if filename is not None:
AddSubversionPropertyChange(filename)
filecount += 1
# Intentionally use the "after" filename so we can show renames.
filename = match.group(2)
svndiff.append("Index: %s\n" % filename)
if match.group(1) != match.group(2):
self.renames[match.group(2)] = match.group(1)
else:
# The "index" line in a git diff looks like this (long hashes elided):
# index 82c0d44..b2cee3f 100755
# We want to save the left hash, as that identifies the base file.
match = re.match(r"index (\w+)\.\.(\w+)", line)
if match:
before, after = (match.group(1), match.group(2))
if before == NULL_HASH:
before = None
if after == NULL_HASH:
after = None
self.hashes[filename] = (before, after)
svndiff.append(line + "\n")
if not filecount:
ErrorExit("No valid patches found in output from git diff")
# Add auto property for the last seen file.
assert filename is not None
AddSubversionPropertyChange(filename)
return "".join(svndiff)
def GenerateDiff(self, extra_args):
extra_args = extra_args[:]
if self.options.revision:
if ":" in self.options.revision:
extra_args = self.options.revision.split(":", 1) + extra_args
else:
extra_args = [self.options.revision] + extra_args
# --no-ext-diff is broken in some versions of Git, so try to work around
# this by overriding the environment (but there is still a problem if the
# git config key "diff.external" is used).
env = os.environ.copy()
if 'GIT_EXTERNAL_DIFF' in env: del env['GIT_EXTERNAL_DIFF']
return RunShell(["git", "diff", "--no-ext-diff", "--full-index", "-M"]
+ extra_args, env=env)
def GetUnknownFiles(self):
status = RunShell(["git", "ls-files", "--exclude-standard", "--others"],
silent_ok=True)
return status.splitlines()
def GetFileContent(self, file_hash, is_binary):
"""Returns the content of a file identified by its git hash."""
data, retcode = RunShellWithReturnCode(["git", "show", file_hash],
universal_newlines=not is_binary)
if retcode:
ErrorExit("Got error status from 'git show %s'" % file_hash)
return data
def GetBaseFile(self, filename):
hash_before, hash_after = self.hashes.get(filename, (None,None))
base_content = None
new_content = None
is_binary = self.IsBinary(filename)
status = None
if filename in self.renames:
status = "A +" # Match svn attribute name for renames.
if filename not in self.hashes:
# If a rename doesn't change the content, we never get a hash.
base_content = RunShell(["git", "show", "HEAD:" + filename])
elif not hash_before:
status = "A"
base_content = ""
elif not hash_after:
status = "D"
else:
status = "M"
is_image = self.IsImage(filename)
# Grab the before/after content if we need it.
# We should include file contents if it's text or it's an image.
if not is_binary or is_image:
# Grab the base content if we don't have it already.
if base_content is None and hash_before:
base_content = self.GetFileContent(hash_before, is_binary)
# Only include the "after" file if it's an image; otherwise it
# it is reconstructed from the diff.
if is_image and hash_after:
new_content = self.GetFileContent(hash_after, is_binary)
return (base_content, new_content, is_binary, status)
class CVSVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for CVS."""
def __init__(self, options):
super(CVSVCS, self).__init__(options)
def GetOriginalContent_(self, filename):
RunShell(["cvs", "up", filename], silent_ok=True)
# TODO need detect file content encoding
content = open(filename).read()
return content.replace("\r\n", "\n")
def GetBaseFile(self, filename):
base_content = None
new_content = None
is_binary = False
status = "A"
output, retcode = RunShellWithReturnCode(["cvs", "status", filename])
if retcode:
ErrorExit("Got error status from 'cvs status %s'" % filename)
if output.find("Status: Locally Modified") != -1:
status = "M"
temp_filename = "%s.tmp123" % filename
os.rename(filename, temp_filename)
base_content = self.GetOriginalContent_(filename)
os.rename(temp_filename, filename)
elif output.find("Status: Locally Added"):
status = "A"
base_content = ""
elif output.find("Status: Needs Checkout"):
status = "D"
base_content = self.GetOriginalContent_(filename)
return (base_content, new_content, is_binary, status)
def GenerateDiff(self, extra_args):
cmd = ["cvs", "diff", "-u", "-N"]
if self.options.revision:
cmd += ["-r", self.options.revision]
cmd.extend(extra_args)
data, retcode = RunShellWithReturnCode(cmd)
count = 0
if retcode == 0:
for line in data.splitlines():
if line.startswith("Index:"):
count += 1
logging.info(line)
if not count:
ErrorExit("No valid patches found in output from cvs diff")
return data
def GetUnknownFiles(self):
status = RunShell(["cvs", "diff"],
silent_ok=True)
unknown_files = []
for line in status.split("\n"):
if line and line[0] == "?":
unknown_files.append(line)
return unknown_files
class MercurialVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Mercurial."""
def __init__(self, options, repo_dir):
super(MercurialVCS, self).__init__(options)
# Absolute path to repository (we can be in a subdir)
self.repo_dir = os.path.normpath(repo_dir)
# Compute the subdir
cwd = os.path.normpath(os.getcwd())
assert cwd.startswith(self.repo_dir)
self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/")
if self.options.revision:
self.base_rev = self.options.revision
else:
self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip()
def _GetRelPath(self, filename):
"""Get relative path of a file according to the current directory,
given its logical path in the repo."""
assert filename.startswith(self.subdir), (filename, self.subdir)
return filename[len(self.subdir):].lstrip(r"\/")
def GenerateDiff(self, extra_args):
cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args
data = RunShell(cmd, silent_ok=True)
svndiff = []
filecount = 0
for line in data.splitlines():
m = re.match("diff --git a/(\S+) b/(\S+)", line)
if m:
# Modify line to make it look like as it comes from svn diff.
# With this modification no changes on the server side are required
# to make upload.py work with Mercurial repos.
# NOTE: for proper handling of moved/copied files, we have to use
# the second filename.
filename = m.group(2)
svndiff.append("Index: %s" % filename)
svndiff.append("=" * 67)
filecount += 1
logging.info(line)
else:
svndiff.append(line)
if not filecount:
ErrorExit("No valid patches found in output from hg diff")
return "\n".join(svndiff) + "\n"
def GetUnknownFiles(self):
"""Return a list of files unknown to the VCS."""
args = []
status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."],
silent_ok=True)
unknown_files = []
for line in status.splitlines():
st, fn = line.split(" ", 1)
if st == "?":
unknown_files.append(fn)
return unknown_files
def GetBaseFile(self, filename):
# "hg status" and "hg cat" both take a path relative to the current subdir
# rather than to the repo root, but "hg diff" has given us the full path
# to the repo root.
base_content = ""
new_content = None
is_binary = False
oldrelpath = relpath = self._GetRelPath(filename)
# "hg status -C" returns two lines for moved/copied files, one otherwise
out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath])
out = out.splitlines()
# HACK: strip error message about missing file/directory if it isn't in
# the working copy
if out[0].startswith('%s: ' % relpath):
out = out[1:]
status, _ = out[0].split(' ', 1)
if len(out) > 1 and status == "A":
# Moved/copied => considered as modified, use old filename to
# retrieve base contents
oldrelpath = out[1].strip()
status = "M"
if ":" in self.base_rev:
base_rev = self.base_rev.split(":", 1)[0]
else:
base_rev = self.base_rev
if status != "A":
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True)
is_binary = "\0" in base_content # Mercurial's heuristic
if status != "R":
new_content = open(relpath, "rb").read()
is_binary = is_binary or "\0" in new_content
if is_binary and base_content:
# Fetch again without converting newlines
base_content = RunShell(["hg", "cat", "-r", base_rev, oldrelpath],
silent_ok=True, universal_newlines=False)
if not is_binary or not self.IsImage(relpath):
new_content = None
return base_content, new_content, is_binary, status
class PerforceVCS(VersionControlSystem):
"""Implementation of the VersionControlSystem interface for Perforce."""
def __init__(self, options):
def ConfirmLogin():
# Make sure we have a valid perforce session
while True:
data, retcode = self.RunPerforceCommandWithReturnCode(
["login", "-s"], marshal_output=True)
if not data:
ErrorExit("Error checking perforce login")
if not retcode and (not "code" in data or data["code"] != "error"):
break
print "Enter perforce password: "
self.RunPerforceCommandWithReturnCode(["login"])
super(PerforceVCS, self).__init__(options)
self.p4_changelist = options.p4_changelist
if not self.p4_changelist:
ErrorExit("A changelist id is required")
if (options.revision):
ErrorExit("--rev is not supported for perforce")
self.p4_port = options.p4_port
self.p4_client = options.p4_client
self.p4_user = options.p4_user
ConfirmLogin()
if not options.message:
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
if description and "desc" in description:
# Rietveld doesn't support multi-line descriptions
raw_message = description["desc"].strip()
lines = raw_message.splitlines()
if len(lines):
options.message = lines[0]
def RunPerforceCommandWithReturnCode(self, extra_args, marshal_output=False,
universal_newlines=True):
args = ["p4"]
if marshal_output:
# -G makes perforce format its output as marshalled python objects
args.extend(["-G"])
if self.p4_port:
args.extend(["-p", self.p4_port])
if self.p4_client:
args.extend(["-c", self.p4_client])
if self.p4_user:
args.extend(["-u", self.p4_user])
args.extend(extra_args)
data, retcode = RunShellWithReturnCode(
args, print_output=False, universal_newlines=universal_newlines)
if marshal_output and data:
data = marshal.loads(data)
return data, retcode
def RunPerforceCommand(self, extra_args, marshal_output=False,
universal_newlines=True):
# This might be a good place to cache call results, since things like
# describe or fstat might get called repeatedly.
data, retcode = self.RunPerforceCommandWithReturnCode(
extra_args, marshal_output, universal_newlines)
if retcode:
ErrorExit("Got error status from %s:\n%s" % (extra_args, data))
return data
def GetFileProperties(self, property_key_prefix = "", command = "describe"):
description = self.RunPerforceCommand(["describe", self.p4_changelist],
marshal_output=True)
changed_files = {}
file_index = 0
# Try depotFile0, depotFile1, ... until we don't find a match
while True:
file_key = "depotFile%d" % file_index
if file_key in description:
filename = description[file_key]
change_type = description[property_key_prefix + str(file_index)]
changed_files[filename] = change_type
file_index += 1
else:
break
return changed_files
def GetChangedFiles(self):
return self.GetFileProperties("action")
def GetUnknownFiles(self):
# Perforce doesn't detect new files, they have to be explicitly added
return []
def IsBaseBinary(self, filename):
base_filename = self.GetBaseFilename(filename)
return self.IsBinaryHelper(base_filename, "files")
def IsPendingBinary(self, filename):
return self.IsBinaryHelper(filename, "describe")
def IsBinary(self, filename):
ErrorExit("IsBinary is not safe: call IsBaseBinary or IsPendingBinary")
def IsBinaryHelper(self, filename, command):
file_types = self.GetFileProperties("type", command)
if not filename in file_types:
ErrorExit("Trying to check binary status of unknown file %s." % filename)
# This treats symlinks, macintosh resource files, temporary objects, and
# unicode as binary. See the Perforce docs for more details:
# http://www.perforce.com/perforce/doc.current/manuals/cmdref/o.ftypes.html
return not file_types[filename].endswith("text")
def GetFileContent(self, filename, revision, is_binary):
file_arg = filename
if revision:
file_arg += "#" + revision
# -q suppresses the initial line that displays the filename and revision
return self.RunPerforceCommand(["print", "-q", file_arg],
universal_newlines=not is_binary)
def GetBaseFilename(self, filename):
actionsWithDifferentBases = [
"move/add", # p4 move
"branch", # p4 integrate (to a new file), similar to hg "add"
"add", # p4 integrate (to a new file), after modifying the new file
]
# We only see a different base for "add" if this is a downgraded branch
# after a file was branched (integrated), then edited.
if self.GetAction(filename) in actionsWithDifferentBases:
# -Or shows information about pending integrations/moves
fstat_result = self.RunPerforceCommand(["fstat", "-Or", filename],
marshal_output=True)
baseFileKey = "resolveFromFile0" # I think it's safe to use only file0
if baseFileKey in fstat_result:
return fstat_result[baseFileKey]
return filename
def GetBaseRevision(self, filename):
base_filename = self.GetBaseFilename(filename)
have_result = self.RunPerforceCommand(["have", base_filename],
marshal_output=True)
if "haveRev" in have_result:
return have_result["haveRev"]
def GetLocalFilename(self, filename):
where = self.RunPerforceCommand(["where", filename], marshal_output=True)
if "path" in where:
return where["path"]
def GenerateDiff(self, args):
class DiffData:
def __init__(self, perforceVCS, filename, action):
self.perforceVCS = perforceVCS
self.filename = filename
self.action = action
self.base_filename = perforceVCS.GetBaseFilename(filename)
self.file_body = None
self.base_rev = None
self.prefix = None
self.working_copy = True
self.change_summary = None
def GenerateDiffHeader(diffData):
header = []
header.append("Index: %s" % diffData.filename)
header.append("=" * 67)
if diffData.base_filename != diffData.filename:
if diffData.action.startswith("move"):
verb = "rename"
else:
verb = "copy"
header.append("%s from %s" % (verb, diffData.base_filename))
header.append("%s to %s" % (verb, diffData.filename))
suffix = "\t(revision %s)" % diffData.base_rev
header.append("--- " + diffData.base_filename + suffix)
if diffData.working_copy:
suffix = "\t(working copy)"
header.append("+++ " + diffData.filename + suffix)
if diffData.change_summary:
header.append(diffData.change_summary)
return header
def GenerateMergeDiff(diffData, args):
# -du generates a unified diff, which is nearly svn format
diffData.file_body = self.RunPerforceCommand(
["diff", "-du", diffData.filename] + args)
diffData.base_rev = self.GetBaseRevision(diffData.filename)
diffData.prefix = ""
# We have to replace p4's file status output (the lines starting
# with +++ or ---) to match svn's diff format
lines = diffData.file_body.splitlines()
first_good_line = 0
while (first_good_line < len(lines) and
not lines[first_good_line].startswith("@@")):
first_good_line += 1
diffData.file_body = "\n".join(lines[first_good_line:])
return diffData
def GenerateAddDiff(diffData):
fstat = self.RunPerforceCommand(["fstat", diffData.filename],
marshal_output=True)
if "headRev" in fstat:
diffData.base_rev = fstat["headRev"] # Re-adding a deleted file
else:
diffData.base_rev = "0" # Brand new file
diffData.working_copy = False
rel_path = self.GetLocalFilename(diffData.filename)
diffData.file_body = open(rel_path, 'r').read()
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -0,0 +1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " @@"
diffData.prefix = "+"
return diffData
def GenerateDeleteDiff(diffData):
diffData.base_rev = self.GetBaseRevision(diffData.filename)
is_base_binary = self.IsBaseBinary(diffData.filename)
# For deletes, base_filename == filename
diffData.file_body = self.GetFileContent(diffData.base_filename,
None,
is_base_binary)
# Replicate svn's list of changed lines
line_count = len(diffData.file_body.splitlines())
diffData.change_summary = "@@ -1"
if line_count > 1:
diffData.change_summary += ",%d" % line_count
diffData.change_summary += " +0,0 @@"
diffData.prefix = "-"
return diffData
changed_files = self.GetChangedFiles()
svndiff = []
filecount = 0
for (filename, action) in changed_files.items():
svn_status = self.PerforceActionToSvnStatus(action)
if svn_status == "SKIP":
continue
diffData = DiffData(self, filename, action)
# Is it possible to diff a branched file? Stackoverflow says no:
# http://stackoverflow.com/questions/1771314/in-perforce-command-line-how-to-diff-a-file-reopened-for-add
if svn_status == "M":
diffData = GenerateMergeDiff(diffData, args)
elif svn_status == "A":
diffData = GenerateAddDiff(diffData)
elif svn_status == "D":
diffData = GenerateDeleteDiff(diffData)
else:
ErrorExit("Unknown file action %s (svn action %s)." % \
(action, svn_status))
svndiff += GenerateDiffHeader(diffData)
for line in diffData.file_body.splitlines():
svndiff.append(diffData.prefix + line)
filecount += 1
if not filecount:
ErrorExit("No valid patches found in output from p4 diff")
return "\n".join(svndiff) + "\n"
def PerforceActionToSvnStatus(self, status):
# Mirroring the list at http://permalink.gmane.org/gmane.comp.version-control.mercurial.devel/28717
# Is there something more official?
return {
"add" : "A",
"branch" : "A",
"delete" : "D",
"edit" : "M", # Also includes changing file types.
"integrate" : "M",
"move/add" : "M",
"move/delete": "SKIP",
"purge" : "D", # How does a file's status become "purge"?
}[status]
def GetAction(self, filename):
changed_files = self.GetChangedFiles()
if not filename in changed_files:
ErrorExit("Trying to get base version of unknown file %s." % filename)
return changed_files[filename]
def GetBaseFile(self, filename):
base_filename = self.GetBaseFilename(filename)
base_content = ""
new_content = None
status = self.PerforceActionToSvnStatus(self.GetAction(filename))
if status != "A":
revision = self.GetBaseRevision(base_filename)
if not revision:
ErrorExit("Couldn't find base revision for file %s" % filename)
is_base_binary = self.IsBaseBinary(base_filename)
base_content = self.GetFileContent(base_filename,
revision,
is_base_binary)
is_binary = self.IsPendingBinary(filename)
if status != "D" and status != "SKIP":
relpath = self.GetLocalFilename(filename)
if is_binary and self.IsImage(relpath):
new_content = open(relpath, "rb").read()
return base_content, new_content, is_binary, status
# NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync.
def SplitPatch(data):
"""Splits a patch into separate pieces for each file.
Args:
data: A string containing the output of svn diff.
Returns:
A list of 2-tuple (filename, text) where text is the svn diff output
pertaining to filename.
"""
patches = []
filename = None
diff = []
for line in data.splitlines(True):
new_filename = None
if line.startswith('Index:'):
unused, new_filename = line.split(':', 1)
new_filename = new_filename.strip()
elif line.startswith('Property changes on:'):
unused, temp_filename = line.split(':', 1)
# When a file is modified, paths use '/' between directories, however
# when a property is modified '\' is used on Windows. Make them the same
# otherwise the file shows up twice.
temp_filename = temp_filename.strip().replace('\\', '/')
if temp_filename != filename:
# File has property changes but no modifications, create a new diff.
new_filename = temp_filename
if new_filename:
if filename and diff:
patches.append((filename, ''.join(diff)))
filename = new_filename
diff = [line]
continue
if diff is not None:
diff.append(line)
if filename and diff:
patches.append((filename, ''.join(diff)))
return patches
def UploadSeparatePatches(issue, rpc_server, patchset, data, options):
"""Uploads a separate patch for each file in the diff output.
Returns a list of [patch_key, filename] for each file.
"""
patches = SplitPatch(data)
rv = []
for patch in patches:
if len(patch[1]) > MAX_UPLOAD_SIZE:
print ("Not uploading the patch for " + patch[0] +
" because the file is too large.")
continue
form_fields = [("filename", patch[0])]
if not options.download_base:
form_fields.append(("content_upload", "1"))
files = [("data", "data.diff", patch[1])]
ctype, body = EncodeMultipartFormData(form_fields, files)
url = "/%d/upload_patch/%d" % (int(issue), int(patchset))
print "Uploading patch for " + patch[0]
response_body = rpc_server.Send(url, body, content_type=ctype)
lines = response_body.splitlines()
if not lines or lines[0] != "OK":
StatusUpdate(" --> %s" % response_body)
sys.exit(1)
rv.append([lines[1], patch[0]])
return rv
def GuessVCSName(options):
"""Helper to guess the version control system.
This examines the current directory, guesses which VersionControlSystem
we're using, and returns an string indicating which VCS is detected.
Returns:
A pair (vcs, output). vcs is a string indicating which VCS was detected
and is one of VCS_GIT, VCS_MERCURIAL, VCS_SUBVERSION, VCS_PERFORCE,
VCS_CVS, or VCS_UNKNOWN.
Since local perforce repositories can't be easily detected, this method
will only guess VCS_PERFORCE if any perforce options have been specified.
output is a string containing any interesting output from the vcs
detection routine, or None if there is nothing interesting.
"""
for attribute, value in options.__dict__.iteritems():
if attribute.startswith("p4") and value != None:
return (VCS_PERFORCE, None)
# Mercurial has a command to get the base directory of a repository
# Try running it, but don't die if we don't have hg installed.
# NOTE: we try Mercurial first as it can sit on top of an SVN working copy.
try:
out, returncode = RunShellWithReturnCode(["hg", "root"])
if returncode == 0:
return (VCS_MERCURIAL, out.strip())
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have hg installed.
raise
# Subversion has a .svn in all working directories.
if os.path.isdir('.svn'):
logging.info("Guessed VCS = Subversion")
return (VCS_SUBVERSION, None)
# Git has a command to test if you're in a git tree.
# Try running it, but don't die if we don't have git installed.
try:
out, returncode = RunShellWithReturnCode(["git", "rev-parse",
"--is-inside-work-tree"])
if returncode == 0:
return (VCS_GIT, None)
except OSError, (errno, message):
if errno != 2: # ENOENT -- they don't have git installed.
raise
# detect CVS repos use `cvs status && $? == 0` rules
try:
out, returncode = RunShellWithReturnCode(["cvs", "status"])
if returncode == 0:
return (VCS_CVS, None)
except OSError, (errno, message):
if errno != 2:
raise
return (VCS_UNKNOWN, None)
def GuessVCS(options):
"""Helper to guess the version control system.
This verifies any user-specified VersionControlSystem (by command line
or environment variable). If the user didn't specify one, this examines
the current directory, guesses which VersionControlSystem we're using,
and returns an instance of the appropriate class. Exit with an error
if we can't figure it out.
Returns:
A VersionControlSystem instance. Exits if the VCS can't be guessed.
"""
vcs = options.vcs
if not vcs:
vcs = os.environ.get("CODEREVIEW_VCS")
if vcs:
v = VCS_ABBREVIATIONS.get(vcs.lower())
if v is None:
ErrorExit("Unknown version control system %r specified." % vcs)
(vcs, extra_output) = (v, None)
else:
(vcs, extra_output) = GuessVCSName(options)
if vcs == VCS_MERCURIAL:
if extra_output is None:
extra_output = RunShell(["hg", "root"]).strip()
return MercurialVCS(options, extra_output)
elif vcs == VCS_SUBVERSION:
return SubversionVCS(options)
elif vcs == VCS_PERFORCE:
return PerforceVCS(options)
elif vcs == VCS_GIT:
return GitVCS(options)
elif vcs == VCS_CVS:
return CVSVCS(options)
ErrorExit(("Could not guess version control system. "
"Are you in a working copy directory?"))
def CheckReviewer(reviewer):
"""Validate a reviewer -- either a nickname or an email addres.
Args:
reviewer: A nickname or an email address.
Calls ErrorExit() if it is an invalid email address.
"""
if "@" not in reviewer:
return # Assume nickname
parts = reviewer.split("@")
if len(parts) > 2:
ErrorExit("Invalid email address: %r" % reviewer)
assert len(parts) == 2
if "." not in parts[1]:
ErrorExit("Invalid email address: %r" % reviewer)
def LoadSubversionAutoProperties():
"""Returns the content of [auto-props] section of Subversion's config file as
a dictionary.
Returns:
A dictionary whose key-value pair corresponds the [auto-props] section's
key-value pair.
In following cases, returns empty dictionary:
- config file doesn't exist, or
- 'enable-auto-props' is not set to 'true-like-value' in [miscellany].
"""
if os.name == 'nt':
subversion_config = os.environ.get("APPDATA") + "\\Subversion\\config"
else:
subversion_config = os.path.expanduser("~/.subversion/config")
if not os.path.exists(subversion_config):
return {}
config = ConfigParser.ConfigParser()
config.read(subversion_config)
if (config.has_section("miscellany") and
config.has_option("miscellany", "enable-auto-props") and
config.getboolean("miscellany", "enable-auto-props") and
config.has_section("auto-props")):
props = {}
for file_pattern in config.options("auto-props"):
props[file_pattern] = ParseSubversionPropertyValues(
config.get("auto-props", file_pattern))
return props
else:
return {}
def ParseSubversionPropertyValues(props):
"""Parse the given property value which comes from [auto-props] section and
returns a list whose element is a (svn_prop_key, svn_prop_value) pair.
See the following doctest for example.
>>> ParseSubversionPropertyValues('svn:eol-style=LF')
[('svn:eol-style', 'LF')]
>>> ParseSubversionPropertyValues('svn:mime-type=image/jpeg')
[('svn:mime-type', 'image/jpeg')]
>>> ParseSubversionPropertyValues('svn:eol-style=LF;svn:executable')
[('svn:eol-style', 'LF'), ('svn:executable', '*')]
"""
key_value_pairs = []
for prop in props.split(";"):
key_value = prop.split("=")
assert len(key_value) <= 2
if len(key_value) == 1:
# If value is not given, use '*' as a Subversion's convention.
key_value_pairs.append((key_value[0], "*"))
else:
key_value_pairs.append((key_value[0], key_value[1]))
return key_value_pairs
def GetSubversionPropertyChanges(filename):
"""Return a Subversion's 'Property changes on ...' string, which is used in
the patch file.
Args:
filename: filename whose property might be set by [auto-props] config.
Returns:
A string like 'Property changes on |filename| ...' if given |filename|
matches any entries in [auto-props] section. None, otherwise.
"""
global svn_auto_props_map
if svn_auto_props_map is None:
svn_auto_props_map = LoadSubversionAutoProperties()
all_props = []
for file_pattern, props in svn_auto_props_map.items():
if fnmatch.fnmatch(filename, file_pattern):
all_props.extend(props)
if all_props:
return FormatSubversionPropertyChanges(filename, all_props)
return None
def FormatSubversionPropertyChanges(filename, props):
"""Returns Subversion's 'Property changes on ...' strings using given filename
and properties.
Args:
filename: filename
props: A list whose element is a (svn_prop_key, svn_prop_value) pair.
Returns:
A string which can be used in the patch file for Subversion.
See the following doctest for example.
>>> print FormatSubversionPropertyChanges('foo.cc', [('svn:eol-style', 'LF')])
Property changes on: foo.cc
___________________________________________________________________
Added: svn:eol-style
+ LF
<BLANKLINE>
"""
prop_changes_lines = [
"Property changes on: %s" % filename,
"___________________________________________________________________"]
for key, value in props:
prop_changes_lines.append("Added: " + key)
prop_changes_lines.append(" + " + value)
return "\n".join(prop_changes_lines) + "\n"
def RealMain(argv, data=None):
"""The real main function.
Args:
argv: Command line arguments.
data: Diff contents. If None (default) the diff is generated by
the VersionControlSystem implementation returned by GuessVCS().
Returns:
A 2-tuple (issue id, patchset id).
The patchset id is None if the base files are not uploaded by this
script (applies only to SVN checkouts).
"""
logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:"
"%(lineno)s %(message)s "))
os.environ['LC_ALL'] = 'C'
options, args = parser.parse_args(argv[1:])
global verbosity
verbosity = options.verbose
if verbosity >= 3:
logging.getLogger().setLevel(logging.DEBUG)
elif verbosity >= 2:
logging.getLogger().setLevel(logging.INFO)
vcs = GuessVCS(options)
base = options.base_url
if isinstance(vcs, SubversionVCS):
# Guessing the base field is only supported for Subversion.
# Note: Fetching base files may become deprecated in future releases.
guessed_base = vcs.GuessBase(options.download_base)
if base:
if guessed_base and base != guessed_base:
print "Using base URL \"%s\" from --base_url instead of \"%s\"" % \
(base, guessed_base)
else:
base = guessed_base
if not base and options.download_base:
options.download_base = True
logging.info("Enabled upload of base file")
if not options.assume_yes:
vcs.CheckForUnknownFiles()
if data is None:
data = vcs.GenerateDiff(args)
data = vcs.PostProcessDiff(data)
if options.print_diffs:
print "Rietveld diff start:*****"
print data
print "Rietveld diff end:*****"
files = vcs.GetBaseFiles(data)
if verbosity >= 1:
print "Upload server:", options.server, "(change with -s/--server)"
if options.issue:
prompt = "Message describing this patch set: "
else:
prompt = "New issue subject: "
message = options.message or raw_input(prompt).strip()
if not message:
ErrorExit("A non-empty message is required")
rpc_server = GetRpcServer(options.server,
options.email,
options.host,
options.save_cookies,
options.account_type)
form_fields = [("subject", message)]
if base:
b = urlparse.urlparse(base)
username, netloc = urllib.splituser(b.netloc)
if username:
logging.info("Removed username from base URL")
base = urlparse.urlunparse((b.scheme, netloc, b.path, b.params,
b.query, b.fragment))
form_fields.append(("base", base))
if options.issue:
form_fields.append(("issue", str(options.issue)))
if options.email:
form_fields.append(("user", options.email))
if options.reviewers:
for reviewer in options.reviewers.split(','):
CheckReviewer(reviewer)
form_fields.append(("reviewers", options.reviewers))
if options.cc:
for cc in options.cc.split(','):
CheckReviewer(cc)
form_fields.append(("cc", options.cc))
description = options.description
if options.description_file:
if options.description:
ErrorExit("Can't specify description and description_file")
file = open(options.description_file, 'r')
description = file.read()
file.close()
if description:
form_fields.append(("description", description))
# Send a hash of all the base file so the server can determine if a copy
# already exists in an earlier patchset.
base_hashes = ""
for file, info in files.iteritems():
if not info[0] is None:
checksum = md5(info[0]).hexdigest()
if base_hashes:
base_hashes += "|"
base_hashes += checksum + ":" + file
form_fields.append(("base_hashes", base_hashes))
if options.private:
if options.issue:
print "Warning: Private flag ignored when updating an existing issue."
else:
form_fields.append(("private", "1"))
# If we're uploading base files, don't send the email before the uploads, so
# that it contains the file status.
if options.send_mail and options.download_base:
form_fields.append(("send_mail", "1"))
if not options.download_base:
form_fields.append(("content_upload", "1"))
if len(data) > MAX_UPLOAD_SIZE:
print "Patch is large, so uploading file patches separately."
uploaded_diff_file = []
form_fields.append(("separate_patches", "1"))
else:
uploaded_diff_file = [("data", "data.diff", data)]
ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file)
response_body = rpc_server.Send("/upload", body, content_type=ctype)
patchset = None
if not options.download_base or not uploaded_diff_file:
lines = response_body.splitlines()
if len(lines) >= 2:
msg = lines[0]
patchset = lines[1].strip()
patches = [x.split(" ", 1) for x in lines[2:]]
else:
msg = response_body
else:
msg = response_body
StatusUpdate(msg)
if not response_body.startswith("Issue created.") and \
not response_body.startswith("Issue updated."):
sys.exit(0)
issue = msg[msg.rfind("/")+1:]
if not uploaded_diff_file:
result = UploadSeparatePatches(issue, rpc_server, patchset, data, options)
if not options.download_base:
patches = result
if not options.download_base:
vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files)
if options.send_mail:
rpc_server.Send("/" + issue + "/mail", payload="")
return issue, patchset
def main():
try:
RealMain(sys.argv)
except KeyboardInterrupt:
print
StatusUpdate("Interrupted.")
sys.exit(1)
if __name__ == "__main__":
main()
| Python |
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for Google API Python client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
from setuptools import setup
packages = [
'apiclient',
'oauth2client',
'apiclient.ext',
'apiclient.contrib',
'apiclient.contrib.latitude',
'apiclient.contrib.moderator',
'uritemplate',
]
install_requires = [
'httplib2>=0.7.4',
'python-gflags',
]
try:
import json
needs_json = False
except ImportError:
needs_json = True
if needs_json:
install_requires.append('simplejson')
long_desc = """The Google API Client for Python is a client library for
accessing the Plus, Moderator, and many other Google APIs."""
import apiclient
version = apiclient.__version__
setup(name="google-api-python-client",
version=version,
description="Google API Client Library for Python",
long_description=long_desc,
author="Joe Gregorio",
author_email="jcgregorio@google.com",
url="http://code.google.com/p/google-api-python-client/",
install_requires=install_requires,
packages=packages,
package_data={
'apiclient': ['contrib/*/*.json']
},
scripts=['bin/enable-app-engine-project'],
license="Apache 2.0",
keywords="google api client",
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP'])
| Python |
# Early, and incomplete implementation of -04.
#
import re
import urllib
RESERVED = ":/?#[]@!$&'()*+,;="
OPERATOR = "+./;?|!@"
EXPLODE = "*+"
MODIFIER = ":^"
TEMPLATE = re.compile(r"{(?P<operator>[\+\./;\?|!@])?(?P<varlist>[^}]+)}", re.UNICODE)
VAR = re.compile(r"^(?P<varname>[^=\+\*:\^]+)((?P<explode>[\+\*])|(?P<partial>[:\^]-?[0-9]+))?(=(?P<default>.*))?$", re.UNICODE)
def _tostring(varname, value, explode, operator, safe=""):
if type(value) == type([]):
if explode == "+":
return ",".join([varname + "." + urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
if type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return ",".join([varname + "." + urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
return urllib.quote(value, safe)
def _tostring_path(varname, value, explode, operator, safe=""):
joiner = operator
if type(value) == type([]):
if explode == "+":
return joiner.join([varname + "." + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + joiner + urllib.quote(value[key], safe) for key in keys])
else:
return ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return urllib.quote(value, safe)
else:
return ""
def _tostring_query(varname, value, explode, operator, safe=""):
joiner = operator
varprefix = ""
if operator == "?":
joiner = "&"
varprefix = varname + "="
if type(value) == type([]):
if 0 == len(value):
return ""
if explode == "+":
return joiner.join([varname + "=" + urllib.quote(x, safe) for x in value])
elif explode == "*":
return joiner.join([urllib.quote(x, safe) for x in value])
else:
return varprefix + ",".join([urllib.quote(x, safe) for x in value])
elif type(value) == type({}):
if 0 == len(value):
return ""
keys = value.keys()
keys.sort()
if explode == "+":
return joiner.join([varname + "." + urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
elif explode == "*":
return joiner.join([urllib.quote(key, safe) + "=" + urllib.quote(value[key], safe) for key in keys])
else:
return varprefix + ",".join([urllib.quote(key, safe) + "," + urllib.quote(value[key], safe) for key in keys])
else:
if value:
return varname + "=" + urllib.quote(value, safe)
else:
return varname
TOSTRING = {
"" : _tostring,
"+": _tostring,
";": _tostring_query,
"?": _tostring_query,
"/": _tostring_path,
".": _tostring_path,
}
def expand(template, vars):
def _sub(match):
groupdict = match.groupdict()
operator = groupdict.get('operator')
if operator is None:
operator = ''
varlist = groupdict.get('varlist')
safe = "@"
if operator == '+':
safe = RESERVED
varspecs = varlist.split(",")
varnames = []
defaults = {}
for varspec in varspecs:
m = VAR.search(varspec)
groupdict = m.groupdict()
varname = groupdict.get('varname')
explode = groupdict.get('explode')
partial = groupdict.get('partial')
default = groupdict.get('default')
if default:
defaults[varname] = default
varnames.append((varname, explode, partial))
retval = []
joiner = operator
prefix = operator
if operator == "+":
prefix = ""
joiner = ","
if operator == "?":
joiner = "&"
if operator == "":
joiner = ","
for varname, explode, partial in varnames:
if varname in vars:
value = vars[varname]
#if not value and (type(value) == type({}) or type(value) == type([])) and varname in defaults:
if not value and value != "" and varname in defaults:
value = defaults[varname]
elif varname in defaults:
value = defaults[varname]
else:
continue
retval.append(TOSTRING[operator](varname, value, explode, operator, safe=safe))
if "".join(retval):
return prefix + joiner.join(retval)
else:
return ""
return TEMPLATE.sub(_sub, template)
| Python |
#!/usr/bin/env python
import glob
import imp
import logging
import os
import sys
import unittest
from trace import fullmodname
logging.basicConfig(level=logging.CRITICAL)
APP_ENGINE_PATH='../google_appengine'
# Conditional import of cleanup function
try:
from tests.utils import cleanup
except:
def cleanup():
pass
# Ensure current working directory is in path
sys.path.insert(0, os.getcwd())
sys.path.insert(0, APP_ENGINE_PATH)
from google.appengine.dist import use_library
use_library('django', '1.2')
def main():
for t in sys.argv[1:]:
module = imp.load_source('test', t)
test = unittest.TestLoader().loadTestsFromModule(module)
result = unittest.TextTestRunner(verbosity=1).run(test)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate command-line samples from stubs.
Generates a command-line client sample application from a set of files
that contain only the relevant portions that change between each API.
This allows all the common code to go into a template.
Usage:
python sample_generator.py
Must be run from the root of the respository directory.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os.path
import glob
import sys
import pprint
import string
import textwrap
if not os.path.isdir('samples/src'):
sys.exit('Must be run from root of the respository directory.')
f = open('samples/src/template.tmpl', 'r')
template = string.Template(f.read())
f.close()
for filename in glob.glob('samples/src/*.py'):
# Create a dictionary from the config file to later use in filling in the
# templates.
f = open(filename, 'r')
contents = f.read()
f.close()
config, content = contents.split('\n\n', 1)
variables = {}
for line in config.split('\n'):
key, value = line[1:].split(':', 1)
variables[key.strip()] = value.strip()
lines = content.split('\n')
outlines = []
for l in lines:
if l:
outlines.append(' ' + l)
else:
outlines.append('')
content = '\n'.join(outlines)
variables['description'] = textwrap.fill(variables['description'])
variables['content'] = content
variables['name'] = os.path.basename(filename).split('.', 1)[0]
f = open(os.path.join('samples', variables['name'], variables['name'] + '.py'), 'w')
f.write(template.substitute(variables))
f.close()
print 'Processed: %s' % variables['name']
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import os
import pydoc
import re
import sys
import httplib2
from oauth2client.anyjson import simplejson
from apiclient.discovery import build
BASE = 'docs/dyn'
def document(resource, path):
print path
collections = []
for name in dir(resource):
if not "_" in name and callable(getattr(resource, name)) and hasattr(
getattr(resource, name), '__is_resource__'):
collections.append(name)
obj, name = pydoc.resolve(type(resource))
page = pydoc.html.page(
pydoc.describe(obj), pydoc.html.document(obj, name))
for name in collections:
page = re.sub('strong>(%s)<' % name, r'strong><a href="%s">\1</a><' % (path + name + ".html"), page)
for name in collections:
document(getattr(resource, name)(), path + name + ".")
f = open(os.path.join(BASE, path + 'html'), 'w')
f.write(page)
f.close()
def document_api(name, version):
service = build(name, version)
document(service, '%s.%s.' % (name, version))
if __name__ == '__main__':
http = httplib2.Http()
resp, content = http.request('https://www.googleapis.com/discovery/v0.3/directory?preferred=true')
if resp.status == 200:
directory = simplejson.loads(content)['items']
for api in directory:
document_api(api['name'], api['version'])
else:
sys.exit("Failed to load the discovery document.")
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build wiki page with a list of all samples.
The information for the wiki page is built from data found in all the README
files in the samples. The format of the README file is:
Description is everything up to the first blank line.
api: plus (Used to look up the long name in discovery).
keywords: appengine (such as appengine, oauth2, cmdline)
The rest of the file is ignored when it comes to building the index.
"""
import httplib2
import itertools
import json
import os
import re
http = httplib2.Http('.cache')
r, c = http.request('https://www.googleapis.com/discovery/v1/apis')
if r.status != 200:
raise ValueError('Received non-200 response when retrieving Discovery document.')
# Dictionary mapping api names to their discovery description.
DIRECTORY = {}
for item in json.loads(c)['items']:
if item['preferred']:
DIRECTORY[item['name']] = item
# A list of valid keywords. Should not be taken as complete, add to
# this list as needed.
KEYWORDS = {
'appengine': 'Google App Engine',
'oauth2': 'OAuth 2.0',
'cmdline': 'Command-line',
'django': 'Django',
'threading': 'Threading',
'pagination': 'Pagination'
}
def get_lines(name, lines):
"""Return lines that begin with name.
Lines are expected to look like:
name: space separated values
Args:
name: string, parameter name.
lines: iterable of string, lines in the file.
Returns:
List of values in the lines that match.
"""
retval = []
matches = itertools.ifilter(lambda x: x.startswith(name + ':'), lines)
for line in matches:
retval.extend(line[len(name)+1:].split())
return retval
def wiki_escape(s):
"""Detect WikiSyntax (i.e. InterCaps, a.k.a. CamelCase) and escape it."""
ret = []
for word in s.split():
if re.match(r'[A-Z]+[a-z]+[A-Z]', word):
word = '!%s' % word
ret.append(word)
return ' '.join(ret)
def context_from_sample(api, keywords, dirname, desc):
"""Return info for expanding a sample into a template.
Args:
api: string, name of api.
keywords: list of string, list of keywords for the given api.
dirname: string, directory name of the sample.
desc: string, long description of the sample.
Returns:
A dictionary of values useful for template expansion.
"""
if api is None:
return None
else:
entry = DIRECTORY[api]
context = {
'api': api,
'version': entry['version'],
'api_name': wiki_escape(entry.get('title', entry.get('description'))),
'api_desc': wiki_escape(entry['description']),
'api_icon': entry['icons']['x32'],
'keywords': keywords,
'dir': dirname,
'dir_escaped': dirname.replace('/', '%2F'),
'desc': wiki_escape(desc),
}
return context
def keyword_context_from_sample(keywords, dirname, desc):
"""Return info for expanding a sample into a template.
Sample may not be about a specific sample.
Args:
keywords: list of string, list of keywords for the given api.
dirname: string, directory name of the sample.
desc: string, long description of the sample.
Returns:
A dictionary of values useful for template expansion.
"""
context = {
'keywords': keywords,
'dir': dirname,
'dir_escaped': dirname.replace('/', '%2F'),
'desc': wiki_escape(desc),
}
return context
def scan_readme_files(dirname):
"""Scans all subdirs of dirname for README files.
Args:
dirname: string, name of directory to walk.
Returns:
(samples, keyword_set): list of information about all samples, the union
of all keywords found.
"""
samples = []
keyword_set = set()
for root, dirs, files in os.walk(dirname):
if 'README' in files:
filename = os.path.join(root, 'README')
with open(filename, 'r') as f:
content = f.read()
lines = content.splitlines()
desc = ' '.join(itertools.takewhile(lambda x: x, lines))
api = get_lines('api', lines)
keywords = get_lines('keywords', lines)
for k in keywords:
if k not in KEYWORDS:
raise ValueError(
'%s is not a valid keyword in file %s' % (k, filename))
keyword_set.update(keywords)
if not api:
api = [None]
samples.append((api[0], keywords, root[1:], desc))
samples.sort()
return samples, keyword_set
def main():
# Get all the information we need out of the README files in the samples.
samples, keyword_set = scan_readme_files('./samples')
# Now build a wiki page with all that information. Accumulate all the
# information as string to be concatenated when were done.
page = ['<wiki:toc max_depth="3" />\n= Samples By API =\n']
# All the samples, grouped by API.
current_api = None
for api, keywords, dirname, desc in samples:
context = context_from_sample(api, keywords, dirname, desc)
if context is None:
continue
if current_api != api:
page.append("""
=== %(api_icon)s %(api_name)s ===
%(api_desc)s
Documentation for the %(api_name)s in [http://api-python-client-doc.appspot.com/%(api)s/%(version)s PyDoc]
""" % context)
current_api = api
page.append('|| [http://code.google.com/p/google-api-python-client/source/browse/#hg%(dir_escaped)s %(dir)s] || %(desc)s ||\n' % context)
# Now group the samples by keywords.
for keyword, keyword_name in KEYWORDS.iteritems():
if keyword not in keyword_set:
continue
page.append('\n= %s Samples =\n\n' % keyword_name)
page.append('<table border=1 cellspacing=0 cellpadding=8px>\n')
for _, keywords, dirname, desc in samples:
context = keyword_context_from_sample(keywords, dirname, desc)
if keyword not in keywords:
continue
page.append("""
<tr>
<td>[http://code.google.com/p/google-api-python-client/source/browse/#hg%(dir_escaped)s %(dir)s] </td>
<td> %(desc)s </td>
</tr>""" % context)
page.append('</table>\n')
print ''.join(page)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| Python |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| Python |
#! /usr/bin/env python
import sys, os
import tarfile, zipfile, gzip, bz2
from optparse import OptionParser
"""
Builds packaged releases of DebugKit so I don't have to do things manually.
Excludes itself (build.py), .gitignore, .DS_Store and the .git folder from the archives.
"""
def main():
parser = OptionParser();
parser.add_option('-o', '--output-dir', dest="output_dir",
help="write the packages to DIR", metavar="DIR")
parser.add_option('-p', '--prefix-name', dest="prefix",
help="prefix used for the generated files")
parser.add_option('-k', '--skip', dest="skip", default="",
help="A comma separated list of files to skip")
parser.add_option('-s', '--source-dir', dest="source", default=".",
help="The source directory for the build process")
(options, args) = parser.parse_args()
if options.output_dir == '' or options.output_dir == options.source:
print 'Requires an output dir, and that output dir cannot be the same as the source one!'
exit()
# append .git and build.py to the skip files
skip = options.skip.split(',')
skip.extend(['.git', '.gitignore', '.DS_Store', 'build.py'])
# get list of files in top level dir.
files = os.listdir(options.source)
os.chdir(options.source)
# filter the files, I couldn't figure out how to do it in a more concise way.
for f in files[:]:
try:
skip.index(f)
files.remove(f)
except ValueError:
pass
# make a boring tar file
destfile = ''.join([options.output_dir, options.prefix])
tar_file_name = destfile + '.tar'
tar = tarfile.open(tar_file_name, 'w');
for f in files:
tar.add(f)
tar.close()
print "Generated tar file"
# make the gzip
if make_gzip(tar_file_name, destfile):
print "Generated gzip file"
else:
print "Could not generate gzip file"
# make the bz2
if make_bz2(tar_file_name, destfile):
print "Generated bz2 file"
else:
print "Could not generate bz2 file"
# make the zip file
zip_recursive(destfile + '.zip', options.source, files)
print "Generated zip file\n"
def make_gzip(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.gzip
"""
tar_contents = open(tar_file, 'rb')
gzipfile = gzip.open(destination + '.tar.gz', 'wb')
gzipfile.writelines(tar_contents)
gzipfile.close()
tar_contents.close()
return True
def make_bz2(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.bz2
"""
tar_contents = open(tar_file, 'rb')
bz2file = bz2.BZ2File(destination + '.tar.bz2', 'wb')
bz2file.writelines(tar_contents)
bz2file.close()
tar_contents.close()
return True
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination
if __name__ == '__main__':
main() | Python |
#! /usr/bin/env python
import sys, os
import tarfile, zipfile, gzip, bz2
from optparse import OptionParser
"""
Builds packaged releases of DebugKit so I don't have to do things manually.
Excludes itself (build.py), .gitignore, .DS_Store and the .git folder from the archives.
"""
def main():
parser = OptionParser();
parser.add_option('-o', '--output-dir', dest="output_dir",
help="write the packages to DIR", metavar="DIR")
parser.add_option('-p', '--prefix-name', dest="prefix",
help="prefix used for the generated files")
parser.add_option('-k', '--skip', dest="skip", default="",
help="A comma separated list of files to skip")
parser.add_option('-s', '--source-dir', dest="source", default=".",
help="The source directory for the build process")
(options, args) = parser.parse_args()
if options.output_dir == '' or options.output_dir == options.source:
print 'Requires an output dir, and that output dir cannot be the same as the source one!'
exit()
# append .git and build.py to the skip files
skip = options.skip.split(',')
skip.extend(['.git', '.gitignore', '.DS_Store', 'build.py'])
# get list of files in top level dir.
files = os.listdir(options.source)
os.chdir(options.source)
# filter the files, I couldn't figure out how to do it in a more concise way.
for f in files[:]:
try:
skip.index(f)
files.remove(f)
except ValueError:
pass
# make a boring tar file
destfile = ''.join([options.output_dir, options.prefix])
tar_file_name = destfile + '.tar'
tar = tarfile.open(tar_file_name, 'w');
for f in files:
tar.add(f)
tar.close()
print "Generated tar file"
# make the gzip
if make_gzip(tar_file_name, destfile):
print "Generated gzip file"
else:
print "Could not generate gzip file"
# make the bz2
if make_bz2(tar_file_name, destfile):
print "Generated bz2 file"
else:
print "Could not generate bz2 file"
# make the zip file
zip_recursive(destfile + '.zip', options.source, files)
print "Generated zip file\n"
def make_gzip(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.gzip
"""
tar_contents = open(tar_file, 'rb')
gzipfile = gzip.open(destination + '.tar.gz', 'wb')
gzipfile.writelines(tar_contents)
gzipfile.close()
tar_contents.close()
return True
def make_bz2(tar_file, destination):
"""
Takes a tar_file and destination. Compressess the tar file and creates
a .tar.bz2
"""
tar_contents = open(tar_file, 'rb')
bz2file = bz2.BZ2File(destination + '.tar.bz2', 'wb')
bz2file.writelines(tar_contents)
bz2file.close()
tar_contents.close()
return True
def zip_recursive(destination, source_dir, rootfiles):
"""
Recursively zips source_dir into destination.
rootfiles should contain a list of files in the top level directory that
are to be included. Any top level files not in rootfiles will be omitted
from the zip file.
"""
zipped = zipfile.ZipFile(destination, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(source_dir):
inRoot = False
if root == source_dir:
inRoot = True
if inRoot:
for d in dirs:
try:
rootfiles.index(d)
except ValueError:
dirs.remove(d)
for f in files[:]:
if inRoot:
try:
rootfiles.index(f)
except ValueError:
continue
fullpath = os.path.join(root, f)
zipped.write(fullpath)
zipped.close()
return destination
if __name__ == '__main__':
main() | Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
"""
Data format classes ("responders") that can be plugged
into model_resource.ModelResource and determine how
the objects of a ModelResource instance are rendered
(e.g. serialized to XML, rendered by templates, ...).
"""
from django.core import serializers
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.core.paginator import QuerySetPaginator, InvalidPage
# the correct paginator for Model objects is the QuerySetPaginator,
# not the Paginator! (see Django doc)
from django.core.xheaders import populate_xheaders
from django import forms
from django.http import Http404, HttpResponse
from django.forms.util import ErrorDict
from django.shortcuts import render_to_response
from django.template import loader, RequestContext
from django.utils import simplejson
from django.utils.xmlutils import SimplerXMLGenerator
from django.views.generic.simple import direct_to_template
class SerializeResponder(object):
"""
Class for all data formats that are possible
with Django's serializer framework.
"""
def __init__(self, format, mimetype=None, paginate_by=None, allow_empty=False):
"""
format:
may be every format that works with Django's serializer
framework. By default: xml, python, json, (yaml).
mimetype:
if the default None is not changed, any HttpResponse calls
use settings.DEFAULT_CONTENT_TYPE and settings.DEFAULT_CHARSET
paginate_by:
Number of elements per page. Default: All elements.
"""
self.format = format
self.mimetype = mimetype
self.paginate_by = paginate_by
self.allow_empty = allow_empty
self.expose_fields = []
def render(self, object_list):
"""
Serializes a queryset to the format specified in
self.format.
"""
# Hide unexposed fields
hidden_fields = []
for obj in list(object_list):
for field in obj._meta.fields:
if not field.name in self.expose_fields and field.serialize:
field.serialize = False
hidden_fields.append(field)
response = serializers.serialize(self.format, object_list)
# Show unexposed fields again
for field in hidden_fields:
field.serialize = True
return response
def element(self, request, elem):
"""
Renders single model objects to HttpResponse.
"""
return HttpResponse(self.render([elem]), self.mimetype)
def error(self, request, status_code, error_dict=None):
"""
Handles errors in a RESTful way.
- appropriate status code
- appropriate mimetype
- human-readable error message
"""
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.write('%d %s' % (status_code, STATUS_CODE_TEXT[status_code]))
if error_dict:
response.write('\n\nErrors:\n')
response.write(error_dict.as_text())
response.status_code = status_code
return response
def list(self, request, queryset, page=None):
"""
Renders a list of model objects to HttpResponse.
"""
if self.paginate_by:
paginator = QuerySetPaginator(queryset, self.paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.page(page).object_list
except (InvalidPage, ValueError):
if page == 1 and self.allow_empty:
object_list = []
else:
return self.error(request, 404)
else:
object_list = list(queryset)
return HttpResponse(self.render(object_list), self.mimetype)
class JSONResponder(SerializeResponder):
"""
JSON data format class.
"""
def __init__(self, paginate_by=None, allow_empty=False):
SerializeResponder.__init__(self, 'json', 'application/json',
paginate_by=paginate_by, allow_empty=allow_empty)
def error(self, request, status_code, error_dict=None):
"""
Return JSON error response that includes a human readable error
message, application-specific errors and a machine readable
status code.
"""
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.status_code = status_code
response_dict = {
"error-message" : '%d %s' % (status_code, STATUS_CODE_TEXT[status_code]),
"status-code" : status_code,
"model-errors" : error_dict.as_ul()
}
simplejson.dump(response_dict, response)
return response
class XMLResponder(SerializeResponder):
"""
XML data format class.
"""
def __init__(self, paginate_by=None, allow_empty=False):
SerializeResponder.__init__(self, 'xml', 'application/xml',
paginate_by=paginate_by, allow_empty=allow_empty)
def error(self, request, status_code, error_dict=None):
"""
Return XML error response that includes a human readable error
message, application-specific errors and a machine readable
status code.
"""
from django.conf import settings
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.status_code = status_code
xml = SimplerXMLGenerator(response, settings.DEFAULT_CHARSET)
xml.startDocument()
xml.startElement("django-error", {})
xml.addQuickElement(name="error-message", contents='%d %s' % (status_code, STATUS_CODE_TEXT[status_code]))
xml.addQuickElement(name="status-code", contents=str(status_code))
if error_dict:
xml.startElement("model-errors", {})
for (model_field, errors) in error_dict.items():
for error in errors:
xml.addQuickElement(name=model_field, contents=error)
xml.endElement("model-errors")
xml.endElement("django-error")
xml.endDocument()
return response
class TemplateResponder(object):
"""
Data format class that uses templates (similar to Django's
generic views).
"""
def __init__(self, template_dir, paginate_by=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None):
self.template_dir = template_dir
self.paginate_by = paginate_by
self.template_loader = template_loader
if not extra_context:
extra_context = {}
for key, value in extra_context.items():
if callable(value):
extra_context[key] = value()
self.extra_context = extra_context
self.allow_empty = allow_empty
self.context_processors = context_processors
self.template_object_name = template_object_name
self.mimetype = mimetype
self.expose_fields = None # Set by Collection.__init__
def _hide_unexposed_fields(self, obj, allowed_fields):
"""
Remove fields from a model that should not be public.
"""
for field in obj._meta.fields:
if not field.name in allowed_fields and \
not field.name + '_id' in allowed_fields:
obj.__dict__.pop(field.name)
def list(self, request, queryset, page=None):
"""
Renders a list of model objects to HttpResponse.
"""
template_name = '%s/%s_list.html' % (self.template_dir, queryset.model._meta.module_name)
if self.paginate_by:
paginator = QuerySetPaginator(queryset, self.paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.page(page).object_list
except (InvalidPage, ValueError):
if page == 1 and self.allow_empty:
object_list = []
else:
raise Http404
current_page = paginator.page(page)
c = RequestContext(request, {
'%s_list' % self.template_object_name: object_list,
'is_paginated': paginator.num_pages > 1,
'results_per_page': self.paginate_by,
'has_next': current_page.has_next(),
'has_previous': current_page.has_previous(),
'page': page,
'next': page + 1,
'previous': page - 1,
'last_on_page': current_page.end_index(),
'first_on_page': current_page.start_index(),
'pages': paginator.num_pages,
'hits' : paginator.count,
}, self.context_processors)
else:
object_list = queryset
c = RequestContext(request, {
'%s_list' % self.template_object_name: object_list,
'is_paginated': False
}, self.context_processors)
if not self.allow_empty and len(queryset) == 0:
raise Http404
# Hide unexposed fields
for obj in object_list:
self._hide_unexposed_fields(obj, self.expose_fields)
c.update(self.extra_context)
t = self.template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=self.mimetype)
def element(self, request, elem):
"""
Renders single model objects to HttpResponse.
"""
template_name = '%s/%s_detail.html' % (self.template_dir, elem._meta.module_name)
t = self.template_loader.get_template(template_name)
c = RequestContext(request, {
self.template_object_name : elem,
}, self.context_processors)
# Hide unexposed fields
self._hide_unexposed_fields(elem, self.expose_fields)
c.update(self.extra_context)
response = HttpResponse(t.render(c), mimetype=self.mimetype)
populate_xheaders(request, response, elem.__class__, getattr(elem, elem._meta.pk.name))
return response
def error(self, request, status_code, error_dict=None):
"""
Renders error template (template name: error status code).
"""
if not error_dict:
error_dict = ErrorDict()
response = direct_to_template(request,
template = '%s/%s.html' % (self.template_dir, str(status_code)),
extra_context = { 'errors' : error_dict },
mimetype = self.mimetype)
response.status_code = status_code
return response
def create_form(self, request, queryset, form_class):
"""
Render form for creation of new collection entry.
"""
ResourceForm = forms.form_for_model(queryset.model, form=form_class)
if request.POST:
form = ResourceForm(request.POST)
else:
form = ResourceForm()
template_name = '%s/%s_form.html' % (self.template_dir, queryset.model._meta.module_name)
return render_to_response(template_name, {'form':form})
def update_form(self, request, pk, queryset, form_class):
"""
Render edit form for single entry.
"""
# Remove queryset cache by cloning the queryset
queryset = queryset._clone()
elem = queryset.get(**{queryset.model._meta.pk.name : pk})
ResourceForm = forms.form_for_instance(elem, form=form_class)
if request.PUT:
form = ResourceForm(request.PUT)
else:
form = ResourceForm()
template_name = '%s/%s_form.html' % (self.template_dir, elem._meta.module_name)
return render_to_response(template_name,
{'form':form, 'update':True, self.template_object_name:elem})
| Python |
"""
Generic resource class.
"""
from django.utils.translation import ugettext as _
from authentication import NoAuthentication
from django.core.urlresolvers import reverse as _reverse
from django.http import Http404, HttpResponse, HttpResponseNotAllowed
def load_put_and_files(request):
"""
Populates request.PUT and request.FILES from
request.raw_post_data. PUT and POST requests differ
only in REQUEST_METHOD, not in the way data is encoded.
Therefore we can use Django's POST data retrieval method
for PUT.
"""
if request.method == 'PUT':
request.method = 'POST'
request._load_post_and_files()
request.method = 'PUT'
request.PUT = request.POST
del request._post
def reverse(viewname, args=(), kwargs=None):
"""
Return the URL associated with a view and specified parameters.
If the regular expression used specifies an optional slash at
the end of the URL, add the slash.
"""
if not kwargs:
kwargs = {}
url = _reverse(viewname, None, args, kwargs)
if url[-2:] == '/?':
url = url[:-1]
return url
class HttpMethodNotAllowed(Exception):
"""
Signals that request.method was not part of
the list of permitted methods.
"""
class ResourceBase(object):
"""
Base class for both model-based and non-model-based
resources.
"""
def __init__(self, authentication=None, permitted_methods=None):
"""
authentication:
the authentication instance that checks whether a
request is authenticated
permitted_methods:
the HTTP request methods that are allowed for this
resource e.g. ('GET', 'PUT')
"""
# Access restrictions
if not authentication:
authentication = NoAuthentication()
self.authentication = authentication
if not permitted_methods:
permitted_methods = ["GET"]
self.permitted_methods = [m.upper() for m in permitted_methods]
def dispatch(self, request, target, *args, **kwargs):
"""
"""
request_method = request.method.upper()
if request_method not in self.permitted_methods:
raise HttpMethodNotAllowed
if request_method == 'GET':
return target.read(request, *args, **kwargs)
elif request_method == 'POST':
return target.create(request, *args, **kwargs)
elif request_method == 'PUT':
load_put_and_files(request)
return target.update(request, *args, **kwargs)
elif request_method == 'DELETE':
return target.delete(request, *args, **kwargs)
else:
raise Http404
def get_url(self):
"""
Returns resource URL.
"""
return reverse(self)
# The four CRUD methods that any class that
# inherits from Resource may implement:
def create(self, request):
raise Http404
def read(self, request):
raise Http404
def update(self, request):
raise Http404
def delete(self, request):
raise Http404
class Resource(ResourceBase):
"""
Generic resource class that can be used for
resources that are not based on Django models.
"""
def __init__(self, authentication=None, permitted_methods=None,
mimetype=None):
"""
authentication:
the authentication instance that checks whether a
request is authenticated
permitted_methods:
the HTTP request methods that are allowed for this
resource e.g. ('GET', 'PUT')
mimetype:
if the default None is not changed, any HttpResponse calls
use settings.DEFAULT_CONTENT_TYPE and settings.DEFAULT_CHARSET
"""
ResourceBase.__init__(self, authentication, permitted_methods)
self.mimetype = mimetype
def __call__(self, request, *args, **kwargs):
"""
Redirects to one of the CRUD methods depending
on the HTTP method of the request. Checks whether
the requested method is allowed for this resource.
"""
# Check permission
if not self.authentication.is_authenticated(request):
response = HttpResponse(_('Authorization Required'), mimetype=self.mimetype)
challenge_headers = self.authentication.challenge_headers()
for k,v in challenge_headers.items():
response[k] = v
response.status_code = 401
return response
try:
return self.dispatch(request, self, *args, **kwargs)
except HttpMethodNotAllowed:
response = HttpResponseNotAllowed(self.permitted_methods)
response.mimetype = self.mimetype
return response
| Python |
"""
Data format classes that can be plugged into
model_resource.ModelResource and determine how submissions
of model data need to look like (e.g. form submission MIME types,
XML, JSON, ...).
"""
from django.core import serializers
from django.forms import model_to_dict
class InvalidFormData(Exception):
"""
Raised if form data can not be decoded into key-value
pairs.
"""
class Receiver(object):
"""
Base class for all "receiver" data format classes.
All subclasses need to implement the method
get_data(self, request, method).
"""
def get_data(self, request, method):
raise Exception("Receiver subclass needs to implement get_data!")
def get_post_data(self, request):
return self.get_data(request, 'POST')
def get_put_data(self, request):
return self.get_data(request, 'PUT')
class FormReceiver(Receiver):
"""
Data format class with standard Django behavior:
POST and PUT data is in form submission format.
"""
def get_data(self, request, method):
return getattr(request, method)
class SerializeReceiver(Receiver):
"""
Base class for all data formats possible
within Django's serializer framework.
"""
def __init__(self, format):
self.format = format
def get_data(self, request, method):
try:
deserialized_objects = list(serializers.deserialize(self.format, request.raw_post_data))
except serializers.base.DeserializationError:
raise InvalidFormData
if len(deserialized_objects) != 1:
raise InvalidFormData
model = deserialized_objects[0].object
return model_to_dict(model)
class JSONReceiver(SerializeReceiver):
"""
Data format class for form submission in JSON,
e.g. for web browsers.
"""
def __init__(self):
self.format = 'json'
class XMLReceiver(SerializeReceiver):
"""
Data format class for form submission in XML,
e.g. for software clients.
"""
def __init__(self):
self.format = 'xml'
| Python |
"""
Model-bound resource class.
"""
from django import forms
from django.conf.urls.defaults import patterns
from django.http import *
from django.forms import ModelForm, models
from django.forms.util import ErrorDict
from django.utils.functional import curry
from django.utils.translation.trans_null import _
from resource import ResourceBase, load_put_and_files, reverse, HttpMethodNotAllowed
from receiver import FormReceiver
class InvalidModelData(Exception):
"""
Raised if create/update fails because the PUT/POST
data is not appropriate.
"""
def __init__(self, errors=None):
if not errors:
errors = ErrorDict()
self.errors = errors
class Collection(ResourceBase):
"""
Resource for a collection of models (queryset).
"""
def __init__(self, queryset, responder, receiver=None, authentication=None,
permitted_methods=None, expose_fields=None, entry_class=None,
form_class=None):
"""
queryset:
determines the subset of objects (of a Django model)
that make up this resource
responder:
the data format instance that creates HttpResponse
objects from single or multiple model objects and
renders forms
receiver:
the data format instance that handles POST and
PUT data
authentication:
the authentication instance that checks whether a
request is authenticated
permitted_methods:
the HTTP request methods that are allowed for this
resource e.g. ('GET', 'PUT')
expose_fields:
the model fields that can be accessed
by the HTTP methods described in permitted_methods
entry_class:
class used for entries in create() and get_entry();
default: class Entry (see below)
form_class:
base form class used for data validation and
conversion in self.create() and Entry.update()
"""
# Available data
self.queryset = queryset
# Input format
if not receiver:
receiver = FormReceiver()
self.receiver = receiver
# Input validation
if not form_class:
form_class = ModelForm
self.form_class = form_class
# Output format / responder setup
self.responder = responder
if not expose_fields:
expose_fields = [field.name for field in queryset.model._meta.fields]
responder.expose_fields = expose_fields
if hasattr(responder, 'create_form'):
responder.create_form = curry(responder.create_form, queryset=queryset, form_class=form_class)
if hasattr(responder, 'update_form'):
responder.update_form = curry(responder.update_form, queryset=queryset, form_class=form_class)
# Resource class for individual objects of the collection
if not entry_class:
entry_class = Entry
self.entry_class = entry_class
ResourceBase.__init__(self, authentication, permitted_methods)
def __call__(self, request, *args, **kwargs):
"""
Redirects to one of the CRUD methods depending
on the HTTP method of the request. Checks whether
the requested method is allowed for this resource.
Catches errors.
"""
# Check authentication
if not self.authentication.is_authenticated(request):
response = self.responder.error(request, 401)
challenge_headers = self.authentication.challenge_headers()
for k,v in challenge_headers.items():
response[k] = v
return response
# Remove queryset cache
self.queryset = self.queryset._clone()
# Determine whether the collection or a specific
# entry is requested. If not specified as a keyword
# argument, assume that any args/kwargs are used to
# select a specific entry from the collection.
if kwargs.has_key('is_entry'):
is_entry = kwargs.pop('is_entry')
else:
eval_args = tuple(x for x in args if x != '' and x != None)
eval_kwargs = tuple(x for x in kwargs.values()
if x != '' and x != None)
is_entry = bool(eval_args or eval_kwargs)
# Redirect either to entry method
# or to collection method. Catch errors.
try:
if is_entry:
entry = self.get_entry(*args, **kwargs)
return self.dispatch(request, entry)
else:
return self.dispatch(request, self)
except HttpMethodNotAllowed:
response = self.responder.error(request, 405)
response['Allow'] = ', '.join(self.permitted_methods)
return response
except (self.queryset.model.DoesNotExist, Http404):
return self.responder.error(request, 404)
except InvalidModelData, i:
return self.responder.error(request, 400, i.errors)
# No other methods allowed: 400 Bad Request
return self.responder.error(request, 400)
def create(self, request):
"""
Creates a resource with attributes given by POST, then
redirects to the resource URI.
"""
# Create form filled with POST data
ResourceForm = models.modelform_factory(self.queryset.model, form=self.form_class)
data = self.receiver.get_post_data(request)
form = ResourceForm(data)
# If the data contains no errors, save the model,
# return a "201 Created" response with the model's
# URI in the location header and a representation
# of the model in the response body.
if form.is_valid():
new_model = form.save()
model_entry = self.entry_class(self, new_model)
response = model_entry.read(request)
response.status_code = 201
response['Location'] = model_entry.get_url()
return response
# Otherwise return a 400 Bad Request error.
raise InvalidModelData(form.errors)
def read(self, request):
"""
Returns a representation of the queryset.
The format depends on which responder (e.g. JSONResponder)
is assigned to this ModelResource instance. Usually called by a
HTTP request to the factory URI with method GET.
"""
return self.responder.list(request, self.queryset)
def get_entry(self, pk_value):
"""
Returns a single entry retrieved by filtering the
collection queryset by primary key value.
"""
model = self.queryset.get(**{self.queryset.model._meta.pk.name : pk_value})
entry = self.entry_class(self, model)
return entry
class Entry(object):
"""
Resource for a single model.
"""
def __init__(self, collection, model):
self.collection = collection
self.model = model
def get_url(self):
"""
Returns the URL for this resource object.
"""
pk_value = getattr(self.model, self.model._meta.pk.name)
return reverse(self.collection, (pk_value,))
def create(self, request):
raise Http404
def read(self, request):
"""
Returns a representation of a single model.
The format depends on which responder (e.g. JSONResponder)
is assigned to this ModelResource instance. Usually called by a
HTTP request to the resource URI with method GET.
"""
return self.collection.responder.element(request, self.model)
def update(self, request):
"""
Changes the attributes of the resource identified by 'ident'
and redirects to the resource URI. Usually called by a HTTP
request to the resource URI with method PUT.
"""
# Create a form from the model/PUT data
ResourceForm = models.modelform_factory(self.model.__class__, form=self.collection.form_class)
data = self.collection.receiver.get_put_data(request)
form = ResourceForm(data, instance=self.model)
# If the data contains no errors, save the model,
# return a "200 Ok" response with the model's
# URI in the location header and a representation
# of the model in the response body.
if form.is_valid():
form.save()
response = self.read(request)
response.status_code = 200
response['Location'] = self.get_url()
return response
# Otherwise return a 400 Bad Request error.
raise InvalidModelData(form.errors)
def delete(self, request):
"""
Deletes the model associated with the current entry.
Usually called by a HTTP request to the entry URI
with method DELETE.
"""
self.model.delete()
return HttpResponse(_("Object successfully deleted."), self.collection.responder.mimetype)
| Python |
VERSION = (1, 1)
| Python |
from django.http import HttpResponse
from django.utils.translation import ugettext as _
import hashlib, time, random
def djangouser_auth(username, password):
"""
Check username and password against
django.contrib.auth.models.User
"""
from django.contrib.auth.models import User
try:
user = User.objects.get(username=username)
if user.check_password(password):
return True
else:
return False
except User.DoesNotExist:
return False
class NoAuthentication(object):
"""
No authentication: Permit every request.
"""
def is_authenticated(self, request):
return True
def challenge_headers(self):
return {}
class HttpBasicAuthentication(object):
"""
HTTP/1.0 basic authentication.
"""
def __init__(self, authfunc=djangouser_auth, realm=_('Restricted Access')):
"""
authfunc:
A user-defined function which takes a username and
password as its first and second arguments respectively
and returns True if the user is authenticated
realm:
An identifier for the authority that is requesting
authorization
"""
self.realm = realm
self.authfunc = authfunc
def challenge_headers(self):
"""
Returns the http headers that ask for appropriate
authorization.
"""
return {'WWW-Authenticate' : 'Basic realm="%s"' % self.realm}
def is_authenticated(self, request):
"""
Checks whether a request comes from an authorized user.
"""
if not request.META.has_key('HTTP_AUTHORIZATION'):
return False
(authmeth, auth) = request.META['HTTP_AUTHORIZATION'].split(' ', 1)
if authmeth.lower() != 'basic':
return False
auth = auth.strip().decode('base64')
username, password = auth.split(':', 1)
return self.authfunc(username=username, password=password)
def digest_password(realm, username, password):
"""
Construct the appropriate hashcode needed for HTTP digest
"""
return hashlib.md5("%s:%s:%s" % (username, realm, password)).hexdigest()
class HttpDigestAuthentication(object):
"""
HTTP/1.1 digest authentication (RFC 2617).
Uses code from the Python Paste Project (MIT Licence).
"""
def __init__(self, authfunc, realm=_('Restricted Access')):
"""
authfunc:
A user-defined function which takes a username and
a realm as its first and second arguments respectively
and returns the combined md5 hash of username,
authentication realm and password.
realm:
An identifier for the authority that is requesting
authorization
"""
self.realm = realm
self.authfunc = authfunc
self.nonce = {} # prevention of replay attacks
def get_auth_dict(self, auth_string):
"""
Splits WWW-Authenticate and HTTP_AUTHORIZATION strings
into a dictionaries, e.g.
{
nonce : "951abe58eddbb49c1ed77a3a5fb5fc2e"',
opaque : "34de40e4f2e4f4eda2a3952fd2abab16"',
realm : "realm1"',
qop : "auth"'
}
"""
amap = {}
for itm in auth_string.split(", "):
(k, v) = [s.strip() for s in itm.split("=", 1)]
amap[k] = v.replace('"', '')
return amap
def get_auth_response(self, http_method, fullpath, username, nonce, realm, qop, cnonce, nc):
"""
Returns the server-computed digest response key.
http_method:
The request method, e.g. GET
username:
The user to be authenticated
fullpath:
The absolute URI to be accessed by the user
nonce:
A server-specified data string which should be
uniquely generated each time a 401 response is made
realm:
A string to be displayed to users so they know which
username and password to use
qop:
Indicates the "quality of protection" values supported
by the server. The value "auth" indicates authentication.
cnonce:
An opaque quoted string value provided by the client
and used by both client and server to avoid chosen
plaintext attacks, to provide mutual authentication,
and to provide some message integrity protection.
nc:
Hexadecimal request counter
"""
ha1 = self.authfunc(realm, username)
ha2 = hashlib.md5('%s:%s' % (http_method, fullpath)).hexdigest()
if qop:
chk = "%s:%s:%s:%s:%s:%s" % (ha1, nonce, nc, cnonce, qop, ha2)
else:
chk = "%s:%s:%s" % (ha1, nonce, ha2)
computed_response = hashlib.md5(chk).hexdigest()
return computed_response
def challenge_headers(self, stale=''):
"""
Returns the http headers that ask for appropriate
authorization.
"""
nonce = hashlib.md5(
"%s:%s" % (time.time(), random.random())).hexdigest()
opaque = hashlib.md5(
"%s:%s" % (time.time(), random.random())).hexdigest()
self.nonce[nonce] = None
parts = {'realm': self.realm, 'qop': 'auth',
'nonce': nonce, 'opaque': opaque }
if stale:
parts['stale'] = 'true'
head = ", ".join(['%s="%s"' % (k, v) for (k, v) in parts.items()])
return {'WWW-Authenticate':'Digest %s' % head}
def is_authenticated(self, request):
"""
Checks whether a request comes from an authorized user.
"""
# Make sure the request is a valid HttpDigest request
if not request.META.has_key('HTTP_AUTHORIZATION'):
return False
fullpath = request.META['SCRIPT_NAME'] + request.META['PATH_INFO']
(authmeth, auth) = request.META['HTTP_AUTHORIZATION'].split(" ", 1)
if authmeth.lower() != 'digest':
return False
# Extract auth parameters from request
amap = self.get_auth_dict(auth)
try:
username = amap['username']
authpath = amap['uri']
nonce = amap['nonce']
realm = amap['realm']
response = amap['response']
assert authpath.split("?", 1)[0] in fullpath
assert realm == self.realm
qop = amap.get('qop', '')
cnonce = amap.get('cnonce', '')
nc = amap.get('nc', '00000000')
if qop:
assert 'auth' == qop
assert nonce and nc
except:
return False
# Compute response key
computed_response = self.get_auth_response(request.method, fullpath, username, nonce, realm, qop, cnonce, nc)
# Compare server-side key with key from client
# Prevent replay attacks
if not computed_response or computed_response != response:
if nonce in self.nonce:
del self.nonce[nonce]
return False
pnc = self.nonce.get(nonce,'00000000')
if nc <= pnc:
if nonce in self.nonce:
del self.nonce[nonce]
return False # stale = True
self.nonce[nonce] = nc
return True
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.