code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
import datetime
import logging
import math
import os
from mapreduce.lib import simplejson
import time
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import quota
from mapreduce import model
from mapreduce import quota
from mapreduce import util
# TODO(user): Make this a product of the reader or in quotas.py
_QUOTA_BATCH_SIZE = 20
# The amount of time to perform scanning in one slice. New slice will be
# scheduled as soon as current one takes this long.
_SLICE_DURATION_SEC = 15
# Delay between consecutive controller callback invocations.
_CONTROLLER_PERIOD_SEC = 2
class Error(Exception):
"""Base class for exceptions in this module."""
class NotEnoughArgumentsError(Error):
"""Required argument is missing."""
class NoDataError(Error):
"""There is no data present for a desired input."""
class MapperWorkerCallbackHandler(base_handler.BaseHandler):
"""Callback handler for mapreduce worker task.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
shard_id: id of the shard.
slice_id: id of the slice.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
self._start_time = self._time()
shard_id = self.shard_id()
# TODO(user): Make this prettier
logging.debug("post: shard=%s slice=%s headers=%s",
shard_id, self.slice_id(), self.request.headers)
shard_state, control = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not shard_state:
# We're letting this task to die. It's up to controller code to
# reinitialize and restart the task.
logging.error("State not found for shard ID %r; shutting down",
shard_id)
return
if control and control.command == model.MapreduceControl.ABORT:
logging.info("Abort command received by shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_ABORTED
shard_state.put()
model.MapreduceControl.abort(spec.mapreduce_id)
return
input_reader = self.input_reader(spec.mapper)
if spec.mapper.params.get("enable_quota", True):
quota_consumer = quota.QuotaConsumer(
quota.QuotaManager(memcache.Client()),
shard_id,
_QUOTA_BATCH_SIZE)
else:
quota_consumer = None
ctx = context.Context(spec, shard_state)
context.Context._set(ctx)
try:
# consume quota ahead, because we do not want to run a datastore
# query if there's not enough quota for the shard.
if not quota_consumer or quota_consumer.check():
scan_aborted = False
entity = None
# We shouldn't fetch an entity from the reader if there's not enough
# quota to process it. Perform all quota checks proactively.
if not quota_consumer or quota_consumer.consume():
for entity in input_reader:
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
else:
shard_state.last_work_item = repr(entity)[:100]
scan_aborted = not self.process_entity(entity, ctx)
# Check if we've got enough quota for the next entity.
if (quota_consumer and not scan_aborted and
not quota_consumer.consume()):
scan_aborted = True
if scan_aborted:
break
else:
scan_aborted = True
if not scan_aborted:
logging.info("Processing done for shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
# We consumed extra quota item at the end of for loop.
# Just be nice here and give it back :)
if quota_consumer:
quota_consumer.put(1)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
# TODO(user): Mike said we don't want this happen in case of
# exception while scanning. Figure out when it's appropriate to skip.
ctx.flush()
finally:
context.Context._set(None)
if quota_consumer:
quota_consumer.dispose()
# Rescheduling work should always be the last statement. It shouldn't happen
# if there were any exceptions in code before it.
if shard_state.active:
self.reschedule(spec, input_reader)
def process_entity(self, entity, ctx):
"""Process a single entity.
Call mapper handler on the entity.
Args:
entity: an entity to process.
ctx: current execution context.
Returns:
True if scan should be continued, False if scan should be aborted.
"""
ctx.counters.increment(context.COUNTER_MAPPER_CALLS)
handler = ctx.mapreduce_spec.mapper.handler
if util.is_generator_function(handler):
for result in handler(entity):
if callable(result):
result(ctx)
else:
try:
if len(result) == 2:
logging.error("Collectors not implemented yet")
else:
logging.error("Got bad output tuple of length %d", len(result))
except TypeError:
logging.error(
"Handler yielded type %s, expected a callable or a tuple",
result.__class__.__name__)
else:
handler(entity)
if self._time() - self._start_time > _SLICE_DURATION_SEC:
logging.debug("Spent %s seconds. Rescheduling",
self._time() - self._start_time)
return False
return True
def shard_id(self):
"""Get shard unique identifier of this task from request.
Returns:
shard identifier as string.
"""
return str(self.request.get("shard_id"))
def slice_id(self):
"""Get slice unique identifier of this task from request.
Returns:
slice identifier as int.
"""
return int(self.request.get("slice_id"))
def input_reader(self, mapper_spec):
"""Get the reader from mapper_spec initialized with the request's state.
Args:
mapper_spec: a mapper spec containing the immutable mapper state.
Returns:
An initialized InputReader.
"""
input_reader_spec_dict = simplejson.loads(
self.request.get("input_reader_state"))
return mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
@staticmethod
def worker_parameters(mapreduce_spec,
shard_id,
slice_id,
input_reader):
"""Fill in mapper worker task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by mapper worker to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
shard_id: id of the shard (part of the whole dataset).
slice_id: id of the slice (part of the shard).
input_reader: InputReader containing the remaining inputs for this
shard.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"shard_id": shard_id,
"slice_id": str(slice_id),
"input_reader_state": input_reader.to_json_str()}
@staticmethod
def get_task_name(shard_id, slice_id):
"""Compute single worker task name.
Args:
shard_id: id of the shard (part of the whole dataset) as string.
slice_id: id of the slice (part of the shard) as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrshard-%s-%s" % (shard_id, slice_id)
def reschedule(self, mapreduce_spec, input_reader):
"""Reschedule worker task to continue scanning work.
Args:
mapreduce_spec: mapreduce specification.
input_reader: remaining input reader to process.
"""
MapperWorkerCallbackHandler.schedule_slice(
self.base_path(), mapreduce_spec, self.shard_id(),
self.slice_id() + 1, input_reader)
@classmethod
def schedule_slice(cls,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
queue_name=None,
eta=None,
countdown=None):
"""Schedule slice scanning by adding it to the task queue.
Args:
base_path: base_path of mapreduce request handlers as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
shard_id: current shard id as string.
slice_id: slice id as int.
input_reader: remaining InputReader for given shard.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
"""
task_params = MapperWorkerCallbackHandler.worker_parameters(
mapreduce_spec, shard_id, slice_id, input_reader)
task_name = MapperWorkerCallbackHandler.get_task_name(shard_id, slice_id)
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
queue_name or "default")
try:
taskqueue.Task(url=base_path + "/worker_callback",
params=task_params,
name=task_name,
eta=eta,
countdown=countdown).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class ControllerCallbackHandler(base_handler.BaseHandler):
"""Supervises mapreduce execution.
Is also responsible for gathering execution status from shards together.
This task is "continuously" running by adding itself again to taskqueue if
mapreduce is still active.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
# TODO(user): Make this logging prettier.
logging.debug("post: id=%s headers=%s",
spec.mapreduce_id, self.request.headers)
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.error("State not found for mapreduce_id '%s'; skipping",
spec.mapreduce_id)
return
shard_states = model.ShardState.find_by_mapreduce_id(spec.mapreduce_id)
if state.active and len(shard_states) != spec.mapper.shard_count:
# Some shards were lost
logging.error("Incorrect number of shard states: %d vs %d; "
"aborting job '%s'",
len(shard_states), spec.mapper.shard_count,
spec.mapreduce_id)
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
model.MapreduceControl.abort(spec.mapreduce_id)
active_shards = [s for s in shard_states if s.active]
failed_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_FAILED]
aborted_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_ABORTED]
if state.active:
state.active = bool(active_shards)
state.active_shards = len(active_shards)
state.failed_shards = len(failed_shards)
state.aborted_shards = len(aborted_shards)
if (not state.active and control and
control.command == model.MapreduceControl.ABORT):
# User-initiated abort *after* all shards have completed.
logging.info("Abort signal received for job '%s'", spec.mapreduce_id)
state.result_status = model.MapreduceState.RESULT_ABORTED
if not state.active:
state.active_shards = 0
if not state.result_status:
# Set final result status derived from shard states.
if [s for s in shard_states
if s.result_status != model.ShardState.RESULT_SUCCESS]:
state.result_status = model.MapreduceState.RESULT_FAILED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
logging.info("Final result for job '%s' is '%s'",
spec.mapreduce_id, state.result_status)
# We don't need a transaction here, since we change only statistics data,
# and we don't care if it gets overwritten/slightly inconsistent.
self.aggregate_state(state, shard_states)
poll_time = state.last_poll_time
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
if not state.active:
# This is the last execution.
# Enqueue done_callback if needed.
def put_state(state):
state.put()
done_callback = spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
if done_callback:
taskqueue.Task(
url=done_callback,
headers={"Mapreduce-Id": spec.mapreduce_id}).add(
spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE,
"default"),
transactional=True)
db.run_in_transaction(put_state, state)
return
else:
state.put()
processing_rate = int(spec.mapper.params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
self.refill_quotas(poll_time, processing_rate, active_shards)
ControllerCallbackHandler.reschedule(
self.base_path(), spec, self.serial_id() + 1)
def aggregate_state(self, mapreduce_state, shard_states):
"""Update current mapreduce state by aggregating shard states.
Args:
mapreduce_state: current mapreduce state as MapreduceState.
shard_states: all shard states (active and inactive). list of ShardState.
"""
processed_counts = []
mapreduce_state.counters_map.clear()
for shard_state in shard_states:
mapreduce_state.counters_map.add_map(shard_state.counters_map)
processed_counts.append(shard_state.counters_map.get(
context.COUNTER_MAPPER_CALLS))
mapreduce_state.set_processed_counts(processed_counts)
def refill_quotas(self,
last_poll_time,
processing_rate,
active_shard_states):
"""Refill quotas for all active shards.
Args:
last_poll_time: Datetime with the last time the job state was updated.
processing_rate: How many items to process per second overall.
active_shard_states: All active shard states, list of ShardState.
"""
if not active_shard_states:
return
quota_manager = quota.QuotaManager(memcache.Client())
current_time = int(self._time())
last_poll_time = time.mktime(last_poll_time.timetuple())
total_quota_refill = processing_rate * max(0, current_time - last_poll_time)
quota_refill = int(math.ceil(
1.0 * total_quota_refill / len(active_shard_states)))
if not quota_refill:
return
# TODO(user): use batch memcache API to refill quota in one API call.
for shard_state in active_shard_states:
quota_manager.put(shard_state.shard_id, quota_refill)
def serial_id(self):
"""Get serial unique identifier of this task from request.
Returns:
serial identifier as int.
"""
return int(self.request.get("serial_id"))
@staticmethod
def get_task_name(mapreduce_spec, serial_id):
"""Compute single controller task name.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrcontrol-%s-%s" % (
mapreduce_spec.mapreduce_id, serial_id)
@staticmethod
def controller_parameters(mapreduce_spec, serial_id):
"""Fill in controller task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by controller to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"serial_id": str(serial_id)}
@classmethod
def reschedule(cls, base_path, mapreduce_spec, serial_id, queue_name=None):
"""Schedule new update status callback task.
Args:
base_path: mapreduce handlers url base path as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
try:
taskqueue.Task(url=base_path + "/controller_callback",
name=task_name, params=task_params,
countdown=_CONTROLLER_PERIOD_SEC).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class KickOffJobHandler(base_handler.BaseHandler):
"""Taskqueue handler which kicks off a mapreduce processing.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
input_readers: List of InputReaders objects separated by semi-colons.
"""
def post(self):
"""Handles kick off request."""
spec = model.MapreduceSpec.from_json_str(
self._get_required_param("mapreduce_spec"))
input_readers_json = simplejson.loads(
self._get_required_param("input_readers"))
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
mapper_input_reader_class = spec.mapper.input_reader_class()
input_readers = [mapper_input_reader_class.from_json_str(reader_json)
for reader_json in input_readers_json]
KickOffJobHandler._schedule_shards(
spec, input_readers, queue_name, self.base_path())
ControllerCallbackHandler.reschedule(
self.base_path(), spec, queue_name=queue_name, serial_id=0)
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _schedule_shards(cls, spec, input_readers, queue_name, base_path):
"""Prepares shard states and schedules their execution.
Args:
spec: mapreduce specification as MapreduceSpec.
input_readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
"""
# Note: it's safe to re-attempt this handler because:
# - shard state has deterministic and unique key.
# - schedule_slice will fall back gracefully if a task already exists.
shard_states = []
for shard_number, input_reader in enumerate(input_readers):
shard = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard.shard_description = str(input_reader)
shard_states.append(shard)
# Retrievs already existing shards.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Puts only non-existing shards.
db.put(shard for shard in shard_states
if shard.key() not in existing_shard_keys)
for shard_number, input_reader in enumerate(input_readers):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
MapperWorkerCallbackHandler.schedule_slice(
base_path, spec, shard_id, 0, input_reader, queue_name=queue_name)
class StartJobHandler(base_handler.JsonHandler):
"""Command handler starts a mapreduce job."""
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
queue_name = mapper_params["queue_name"] = mapper_params.get(
"queue_name", "default")
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", model._DEFAULT_SHARD_COUNT)))
mapreduce_id = type(self)._start_map(
mapreduce_name,
mapper_spec,
params,
base_path=self.base_path(),
queue_name=queue_name,
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _start_map(cls, name, mapper_spec,
mapreduce_params,
base_path="/mapreduce",
queue_name="default",
eta=None,
countdown=None,
_app=None):
# Check that handler can be instantiated.
mapper_spec.get_handler()
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_readers = mapper_input_reader_class.split_input(mapper_spec)
if not mapper_input_readers:
raise NoDataError("Found no mapper input readers to process.")
mapper_spec.shard_count = len(mapper_input_readers)
state = model.MapreduceState.create_new()
mapreduce_spec = model.MapreduceSpec(
name,
state.key().id_or_name(),
mapper_spec.to_json(),
mapreduce_params)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = mapper_spec.shard_count
if _app:
state.app_id = _app
# TODO(user): Initialize UI fields correctly.
state.char_url = ""
state.sparkline_url = ""
def schedule_mapreduce(state, mapper_input_readers, eta, countdown):
state.put()
readers_json = [reader.to_json_str() for reader in mapper_input_readers]
taskqueue.Task(
url=base_path + "/kickoffjob_callback",
params={"mapreduce_spec": state.mapreduce_spec.to_json_str(),
"input_readers": simplejson.dumps(readers_json)},
eta=eta, countdown=countdown).add(queue_name, transactional=True)
# Point of no return: We're actually going to run this job!
db.run_in_transaction(
schedule_mapreduce, state, mapper_input_readers, eta, countdown)
return state.key().id_or_name()
class CleanUpJobHandler(base_handler.JsonHandler):
"""Command to kick off tasks to clean up a job's data."""
def handle(self):
# TODO(user): Have this kick off a task to clean up all MapreduceState,
# ShardState, and MapreduceControl entities for a job ID.
self.json_response["status"] = "This does nothing yet."
class AbortJobHandler(base_handler.JsonHandler):
"""Command to abort a running job."""
def handle(self):
model.MapreduceControl.abort(self.request.get("mapreduce_id"))
self.json_response["status"] = "Abort signal sent."
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for controlling MapReduce execution outside of MapReduce framework."""
__all__ = ["start_map"]
# pylint: disable-msg=C6409
from mapreduce import handlers
from mapreduce import model
def start_map(name,
handler_spec,
reader_spec,
reader_parameters,
shard_count,
mapreduce_parameters={},
base_path="/mapreduce",
queue_name="default",
eta=None,
countdown=None,
_app=None):
"""Start a new, mapper-only mapreduce.
Args:
name: mapreduce name. Used only for display purposes.
handler_spec: fully qualified name of mapper handler function/class to call.
reader_spec: fully qualified name of mapper reader to use
reader_parameters: dictionary of parameters to pass to reader. These are
reader-specific.
shard_count: number of shards to create.
mapreduce_parameters: dictionary of mapreduce parameters relevant to the
whole job.
base_path: base path of mapreduce library handler specified in app.yaml.
"/mapreduce" by default.
queue_name: executor queue name to be used for mapreduce tasks.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
Returns:
mapreduce id as string.
"""
mapper_spec = model.MapperSpec(handler_spec, reader_spec, reader_parameters,
shard_count)
return handlers.StartJobHandler._start_map(
name,
mapper_spec,
mapreduce_parameters,
base_path=base_path,
queue_name=queue_name,
eta=eta,
countdown=countdown,
_app=_app)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for map-reduce implementation.
This module should be specified as a handler for mapreduce URLs in app.yaml:
handlers:
- url: /mapreduce(/.*)?
login: admin
script: mapreduce/main.py
"""
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from mapreduce import handlers
from mapreduce import status
class RedirectHandler(webapp.RequestHandler):
"""Redirects the user back to the status page."""
def get(self):
new_path = self.request.path
if not new_path.endswith('/'):
new_path += '/'
new_path += 'status'
self.redirect(new_path)
def create_application():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all mapreduce handlers
registered.
"""
return webapp.WSGIApplication([
# Task queue handlers.
(r".*/worker_callback", handlers.MapperWorkerCallbackHandler),
(r".*/controller_callback", handlers.ControllerCallbackHandler),
(r".*/kickoffjob_callback", handlers.KickOffJobHandler),
# RPC requests with JSON responses
(r".*/command/start_job", handlers.StartJobHandler),
(r".*/command/cleanup_job", handlers.CleanUpJobHandler),
(r".*/command/abort_job", handlers.AbortJobHandler),
(r".*/command/list_configs", status.ListConfigsHandler),
(r".*/command/list_jobs", status.ListJobsHandler),
(r".*/command/get_job_detail", status.GetJobDetailHandler),
# Catch all redirects to status page.
(r"/[^/]+(?:/)?", RedirectHandler),
# UI static files
(r".+/([a-zA-Z0-9]+(?:\.(?:css|js))?)", status.ResourceHandler),
],
debug=True)
APP = create_application()
def main():
util.run_wsgi_app(APP)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple quota system backed by memcache storage."""
# Memcache namespace to use.
_QUOTA_NAMESPACE = "quota"
# Offset all quota values by this amount since memcache incr/decr
# operate only with unsigned values.
_OFFSET = 2**32
class QuotaManager(object):
"""Simple quota system manager, backed by memcache storage.
Since memcache storage is not reliable, this quota system is not reliable and
best effort only.
Quota is managed by buckets. Each bucket contains a 32-bit int value of
available quota. Buckets should be refilled manually with 'put' method.
It is safe to use a single bucket from multiple clients simultaneously.
"""
def __init__(self, memcache_client):
"""Initialize new instance.
Args:
memcache_client: an instance of memcache client to use.
"""
self.memcache_client = memcache_client
def put(self, bucket, amount):
"""Put amount into quota bucket.
Args:
bucket: quota bucket as string.
amount: amount to bit put into quota as int.
"""
self.memcache_client.incr(bucket, delta=amount,
initial_value=_OFFSET, namespace=_QUOTA_NAMESPACE)
def consume(self, bucket, amount, consume_some=False):
"""Consume amount from quota bucket.
Args:
bucket: quota bucket as string.
amount: amount to consume.
consume_some: specifies behavior in case of not enough quota. If False,
the method will leave quota intact and return 0. If True, will try to
consume as much as possible.
Returns:
Amount of quota consumed.
"""
new_quota = self.memcache_client.decr(
bucket, delta=amount, initial_value=_OFFSET, namespace=_QUOTA_NAMESPACE)
if new_quota >= _OFFSET:
return amount
if consume_some and _OFFSET - new_quota < amount:
# we still can consume some
self.put(bucket, _OFFSET - new_quota)
return amount - (_OFFSET - new_quota)
else:
self.put(bucket, amount)
return 0
def get(self, bucket):
"""Get current bucket amount.
Args:
bucket: quota bucket as string.
Returns:
current bucket amount as int.
"""
amount = self.memcache_client.get(bucket, namespace=_QUOTA_NAMESPACE)
if amount:
return int(amount) - _OFFSET
else:
return 0
def set(self, bucket, amount):
"""Set bucket amount.
Args:
bucket: quota bucket as string.
amount: new bucket amount as int.
"""
self.memcache_client.set(bucket, amount + _OFFSET,
namespace=_QUOTA_NAMESPACE)
class QuotaConsumer(object):
"""Quota consumer wrapper for efficient quota consuming/reclaiming.
Quota is consumed in batches and put back in dispose() method.
WARNING: Always call the dispose() method if you need to keep quota
consistent.
"""
def __init__(self, quota_manager, bucket, batch_size):
"""Initialize new instance.
Args:
quota_manager: quota manager to use for quota operations as QuotaManager.
bucket: quota bucket name as string.
batch_size: batch size for quota consuming as int.
"""
self.quota_manager = quota_manager
self.batch_size = batch_size
self.bucket = bucket
self.quota = 0
def consume(self, amount=1):
"""Consume quota.
Args:
amount: amount of quota to be consumed as int.
Returns:
True if quota was successfully consumed, False if there's not enough
quota.
"""
while self.quota < amount:
delta = self.quota_manager.consume(self.bucket, self.batch_size,
consume_some=True)
if not delta:
return False
self.quota += delta
self.quota -= amount
return True
def put(self, amount=1):
"""Put quota back.
Args:
amount: amount of quota as int.
"""
self.quota += amount
def check(self, amount=1):
"""Check that we have enough quota right now.
This doesn't lock or consume the quota. Following consume might in fact
fail/succeeded.
Args:
amount: amount of quota to check.
Returns:
True if we have enough quota to consume specified amount right now. False
otherwise.
"""
if self.quota >= amount:
return True
return self.quota + self.quota_manager.get(self.bucket) >= amount
def dispose(self):
"""Dispose QuotaConsumer and put all actually unconsumed quota back.
This method has to be called for quota consistency!
"""
self.quota_manager.put(self.bucket, self.quota)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
__all__ = ["JsonMixin", "JsonProperty", "MapreduceState", "MapperSpec",
"MapreduceControl", "MapreduceSpec", "ShardState", "CountersMap"]
import copy
import datetime
import logging
import math
import random
from mapreduce.lib import simplejson
import time
import types
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.ext import db
from mapreduce import context
from mapreduce import util
from mapreduce.lib.graphy.backends import google_chart_api
# Default rate of processed entities per second.
_DEFAULT_PROCESSING_RATE_PER_SEC = 100
# Default number of shards to have.
_DEFAULT_SHARD_COUNT = 8
class JsonMixin(object):
"""Simple, stateless json utilities mixin.
Requires class to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def to_json_str(self):
"""Convert data to json string representation.
Returns:
json representation as string.
"""
return simplejson.dumps(self.to_json(), sort_keys=True)
@classmethod
def from_json_str(cls, json_str):
"""Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string.
"""
return cls.from_json(simplejson.loads(json_str))
class JsonProperty(db.UnindexedProperty):
"""Property type for storing json representation of data.
Requires data types to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def __init__(self, data_type, default=None, **kwargs):
"""Constructor.
Args:
data_type: underlying data type as class.
default: default value for the property. The value is deep copied
fore each model instance.
kwargs: remaining arguments.
"""
kwargs["default"] = default
super(JsonProperty, self).__init__(**kwargs)
self.data_type = data_type
def get_value_for_datastore(self, model_instance):
"""Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
"""
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
return datastore_types.Text(simplejson.dumps(
value.to_json(), sort_keys=True))
def make_value_from_datastore(self, value):
"""Convert value from datastore representation.
Args:
value: datastore value.
Returns:
value to store in the model.
"""
if value is None:
return None
return self.data_type.from_json(simplejson.loads(value))
def validate(self, value):
"""Validate value.
Args:
value: model value.
Returns:
Whether the specified value is valid data type value.
Raises:
BadValueError: when value is not of self.data_type type.
"""
if value is not None and not isinstance(value, self.data_type):
raise datastore_errors.BadValueError(
"Property %s must be convertible to a %s instance (%s)" %
(self.name, self.data_type, value))
return super(JsonProperty, self).validate(value)
def empty(self, value):
"""Checks if value is empty.
Args:
value: model value.
Returns:
True passed value is empty.
"""
return not value
def default_value(self):
"""Create default model value.
If default option was specified, then it will be deeply copied.
None otherwise.
Returns:
default model value.
"""
if self.default:
return copy.deepcopy(self.default)
else:
return None
# Ridiculous future UNIX epoch time, 500 years from now.
_FUTURE_TIME = 2**34
def _get_descending_key(gettime=time.time, getrandint=random.randint):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
getrandint: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
tie_breaker = getrandint(0, 100)
return "%d%d" % (now_descending, tie_breaker)
class CountersMap(JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name):
"""Get current counter value.
Args:
counter_name: counter name as string.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, 0)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
def clear(self):
"""Clear all values."""
self.counters = {}
def to_json(self):
"""Serializes all the data in this map into json form.
Returns:
json-compatible data representation.
"""
return {"counters": self.counters}
@classmethod
def from_json(cls, json):
"""Create new CountersMap from the json data structure, encoded by to_json.
Args:
json: json representation of CountersMap .
Returns:
an instance of CountersMap with all data deserialized from json.
"""
counters_map = cls()
counters_map.counters = json["counters"]
return counters_map
class MapperSpec(JsonMixin):
"""Contains a specification for the mapper phase of the mapreduce.
MapperSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapperSpec is
passed as a payload to all mapreduce tasks in JSON encoding as part of
MapreduceSpec.
Specifying mapper handlers:
* '<module_name>.<class_name>' - __call__ method of class instance will be
called
* '<module_name>.<function_name>' - function will be called.
* '<module_name>.<class_name>.<method_name>' - class will be instantiated
and method called.
"""
def __init__(self, handler_spec, input_reader_spec, params, shard_count):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
shard_count: number of shards to process in parallel.
handler: cached instance of mapper handler as callable.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
"""
self.handler_spec = handler_spec
self.__handler = None
self.input_reader_spec = input_reader_spec
self.shard_count = shard_count
self.params = params
def get_handler(self):
"""Get mapper handler instance.
Returns:
cached handler instance as callable.
"""
if self.__handler is None:
resolved_spec = util.for_name(self.handler_spec)
if isinstance(resolved_spec, type):
# create new instance if this is type
self.__handler = resolved_spec()
elif isinstance(resolved_spec, types.MethodType):
# bind the method
self.__handler = getattr(resolved_spec.im_class(),
resolved_spec.__name__)
else:
self.__handler = resolved_spec
return self.__handler
handler = property(get_handler)
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
return {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count,
}
@classmethod
def from_json(cls, json):
"""Creates MapperSpec from a dict-like object."""
return cls(json["mapper_handler_spec"],
json["mapper_input_reader"],
json["mapper_params"],
json["mapper_shard_count"])
class MapreduceSpec(JsonMixin):
"""Contains a specification for the whole mapreduce.
MapreduceSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapreduceSpec is
passed as a payload to all mapreduce tasks in json encoding.
"""
# Url to call when mapreduce finishes its execution.
PARAM_DONE_CALLBACK = "done_callback"
# Queue to use to call done callback
PARAM_DONE_CALLBACK_QUEUE = "done_callback_queue"
def __init__(self,
name,
mapreduce_id,
mapper_spec,
params = {}):
"""Create new MapreduceSpec.
Args:
name: The name of this mapreduce job type.
mapreduce_id: ID of the mapreduce.
mapper_spec: JSON-encoded string containing a MapperSpec.
params: dictionary of additional mapreduce parameters.
Properties:
name: The name of this mapreduce job type.
mapreduce_id: unique id of this mapreduce as string.
mapper: This MapreduceSpec's instance of MapperSpec.
params: dictionary of additional mapreduce parameters.
"""
self.name = name
self.mapreduce_id = mapreduce_id
self.mapper = MapperSpec.from_json(mapper_spec)
self.params = params
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
}
@classmethod
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"))
return mapreduce_spec
class MapreduceState(db.Model):
"""Holds accumulated state of mapreduce execution.
MapreduceState is stored in datastore with a key name equal to the
mapreduce ID. Only controller tasks can write to MapreduceState.
Properties:
mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
active: if we have this mapreduce running right now
last_poll_time: last time controller job has polled this mapreduce.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
chart_url: last computed mapreduce status chart url. This chart displays the
progress of all the shards the best way it can.
sparkline_url: last computed mapreduce status chart url in small format.
result_status: If not None, the final status of the job.
active_shards: How many shards are still processing.
start_time: When the job started.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
mapreduce_spec = JsonProperty(MapreduceSpec, indexed=False)
active = db.BooleanProperty(default=True, indexed=False)
last_poll_time = db.DateTimeProperty(required=True)
counters_map = JsonProperty(CountersMap, default=CountersMap(), indexed=False)
app_id = db.StringProperty(required=False, indexed=True)
# For UI purposes only.
chart_url = db.TextProperty(default="")
sparkline_url = db.TextProperty(default="")
result_status = db.StringProperty(required=False, choices=_RESULTS)
active_shards = db.IntegerProperty(default=0, indexed=False)
failed_shards = db.IntegerProperty(default=0, indexed=False)
aborted_shards = db.IntegerProperty(default=0, indexed=False)
start_time = db.DateTimeProperty(auto_now_add=True)
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), mapreduce_id)
def set_processed_counts(self, shards_processed):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart(shards_processed)
if self.mapreduce_spec and shards_processed:
chart.bottom.labels = [
str(x) for x in xrange(self.mapreduce_spec.mapper.shard_count)]
chart.left.labels = ['0', str(max(shards_processed))]
chart.left.min = 0
self.chart_url = chart.display.Url(300, 200)
def get_processed(self):
"""Number of processed entities.
Returns:
The total number of processed entities as int.
"""
return self.counters_map.get(context.COUNTER_MAPPER_CALLS)
processed = property(get_processed)
@staticmethod
def create_new(getkeyname=_get_descending_key,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
getkeyname: Used for testing.
gettime: Used for testing.
"""
state = MapreduceState(key_name=getkeyname(),
last_poll_time=gettime())
state.set_processed_counts([])
return state
class ShardState(db.Model):
"""Single shard execution state.
The shard state is stored in the datastore and is later aggregated by
controller task. Shard key_name is equal to shard_id.
Properties:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
active = db.BooleanProperty(default=True, indexed=False)
counters_map = JsonProperty(CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
# For UI purposes only.
mapreduce_id = db.StringProperty(required=True)
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_id(cls, mapreduce_id):
"""Find all shard states for given mapreduce.
Args:
mapreduce_id: mapreduce id.
Returns:
iterable of all ShardState for given mapreduce id.
"""
return cls.all().filter("mapreduce_id =", mapreduce_id).fetch(99999)
@classmethod
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
class MapreduceControl(db.Model):
"""Datastore entity used to control mapreduce job execution.
Only one command may be sent to jobs at a time.
Properties:
command: The command to send to the job.
"""
ABORT = "abort"
_COMMANDS = frozenset([ABORT])
_KEY_NAME = "command"
command = db.TextProperty(choices=_COMMANDS, required=True)
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
@classmethod
def abort(cls, mapreduce_id):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines input readers for MapReduce."""
# pylint: disable-msg=C6409
import logging
import math
import StringIO
import zipfile
from google.appengine.api import datastore
from mapreduce.lib import blobstore
from google.appengine.ext import db
from mapreduce.lib import key_range
from mapreduce import util
from mapreduce.model import JsonMixin
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadReaderParamsError(Error):
"""The input parameters to a reader were invalid."""
class InputReader(JsonMixin):
"""Abstract base class for input readers.
InputReaders have the following properties:
* They are created by using the split_input method to generate a set of
InputReaders from a MapperSpec.
* They generate inputs to the mapper via the iterator interface.
* After creation, they can be serialized and resumed using the JsonMixin
interface.
* They are cast to string for a user-readable description; it may be
valuable to implement __str__.
"""
# Mapreduce parameters.
_APP_PARAM = "_app"
MAPPER_PARAMS = "mapper_params"
def __iter__(self):
return self
def next(self):
"""Returns the next input from this input reader as a key, value pair.
Returns:
The next input from this input reader.
"""
raise NotImplementedError
@classmethod
def from_json(cls, input_shard_state):
"""Creates an instance of the InputReader for the given input shard state.
Args:
input_shard_state: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
raise NotImplementedError
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
raise NotImplementedError
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
raise NotImplementedError
# TODO(user): Use cursor API as soon as we have it available.
class DatastoreInputReader(InputReader):
"""Represents a range in query results.
DatastoreInputReader yields model instances from the entities in a given key
range. Iterating over DatastoreInputReader changes its range past consumed
entries.
The class shouldn't be instantiated directly. Use the split_input class method
instead.
"""
# Number of entities to fetch at once while doing scanning.
_BATCH_SIZE = 50
# Maximum number of shards we'll create.
_MAX_SHARD_COUNT = 256
# Mapreduce parameters.
ENTITY_KIND_PARAM = "entity_kind"
KEYS_ONLY_PARAM = "keys_only"
BATCH_SIZE_PARAM = "batch_size"
KEY_RANGE_PARAM = "key_range"
# TODO(user): Add support for arbitrary queries. It's not possible to
# support them without cursors since right now you can't even serialize query
# definition.
def __init__(self, entity_kind, key_range_param, mapper_params):
"""Create new DatastoreInputReader object.
This is internal constructor. Use split_query instead.
Args:
entity_kind: entity kind as string.
key_range_param: key range to process as key_range.KeyRange.
mapper_params: mapper parameters as defined by user.
"""
self._entity_kind = entity_kind
self._key_range = key_range_param
self._mapper_params = mapper_params
self._batch_size = int(self._mapper_params.get(
self.BATCH_SIZE_PARAM, self._BATCH_SIZE))
def __iter__(self):
"""Create a generator for model instances for entities.
Iterating through entities moves query range past the consumed entities.
Yields:
next model instance.
"""
while True:
query = self._key_range.make_ascending_query(
util.for_name(self._entity_kind))
results = query.fetch(limit=self._batch_size)
if not results:
break
for model_instance in results:
key = model_instance.key()
self._key_range.advance(key)
yield model_instance
# TODO(user): use query splitting functionality when it becomes available
# instead.
@classmethod
def _split_input_from_params(cls, app, entity_kind_name,
params, shard_count):
"""Return input reader objects. Helper for split_input."""
raw_entity_kind = util.get_short_name(entity_kind_name)
# we use datastore.Query instead of ext.db.Query here, because we can't
# erase ordering on db.Query once we set it.
ds_query = datastore.Query(kind=raw_entity_kind, _app=app, keys_only=True)
ds_query.Order("__key__")
first_entity_key_list = ds_query.Get(1)
if not first_entity_key_list:
return []
first_entity_key = first_entity_key_list[0]
ds_query.Order(("__key__", datastore.Query.DESCENDING))
try:
last_entity_key, = ds_query.Get(1)
except db.NeedIndexError, e:
# TODO(user): Show this error in the worker log, not the app logs.
logging.warning("Cannot create accurate approximation of keyspace, "
"guessing instead. Please address this problem: %s", e)
# TODO(user): Use a key-end hint from the user input parameters
# in this case, in the event the user has a good way of figuring out
# the range of the keyspace.
last_entity_key = key_range.KeyRange.guess_end_key(raw_entity_kind,
first_entity_key)
full_keyrange = key_range.KeyRange(
first_entity_key, last_entity_key, None, True, True, _app=app)
key_ranges = [full_keyrange]
number_of_half_splits = int(math.floor(math.log(shard_count, 2)))
for _ in range(0, number_of_half_splits):
new_ranges = []
for r in key_ranges:
new_ranges += r.split_range(1)
key_ranges = new_ranges
return [cls(entity_kind_name, r, params) for r in key_ranges]
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
A list of InputReader objects of length <= number_of_shards. These
may be DatastoreInputReader or DatastoreKeyInputReader objects.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
# keys_only remains for backwards compatability. It may go away.
keys_only = util.parse_bool(params.get(cls.KEYS_ONLY_PARAM, False))
if keys_only:
raise BadReaderParamsError("The keys_only parameter is obsolete. "
"Use DatastoreKeyInputReader instead.")
# Fail fast if Model cannot be located.
util.for_name(entity_kind_name)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
json_dict = {self.KEY_RANGE_PARAM: self._key_range.to_json(),
self.ENTITY_KIND_PARAM: self._entity_kind,
self.MAPPER_PARAMS: self._mapper_params}
return json_dict
def __str__(self):
"""Returns the string representation of this DatastoreInputReader."""
return repr(self._key_range)
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
query_range = cls(json[cls.ENTITY_KIND_PARAM],
key_range.KeyRange.from_json(json[cls.KEY_RANGE_PARAM]),
json[cls.MAPPER_PARAMS])
return query_range
class DatastoreKeyInputReader(DatastoreInputReader):
"""An input reader which takes a Kind and yields Keys for that kind."""
def __iter__(self):
"""Create a generator for keys in the range.
Iterating through entries moves query range past the consumed entries.
Yields:
next entry.
"""
while True:
raw_entity_kind = util.get_short_name(self._entity_kind)
query = self._key_range.make_ascending_datastore_query(
raw_entity_kind, keys_only=True)
results = query.Get(limit=self._batch_size)
if not results:
break
for key in results:
self._key_range.advance(key)
yield key
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
A list of DatastoreKeyInputReader objects of length <= number_of_shards.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
class DatastoreEntityInputReader(DatastoreInputReader):
"""An input reader which yields low level datastore entities for a kind."""
def __iter__(self):
"""Create a generator for low level entities in the range.
Iterating through entries moves query range past the consumed entries.
Yields:
next entry.
"""
while True:
raw_entity_kind = util.get_short_name(self._entity_kind)
query = self._key_range.make_ascending_datastore_query(raw_entity_kind)
results = query.Get(limit=self._batch_size)
if not results:
break
for entity in results:
self._key_range.advance(entity.key())
yield entity
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
List of DatastoreEntityInputReader objects of length <= number_of_shards.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
class BlobstoreLineInputReader(InputReader):
"""Input reader for a newline delimited blob in Blobstore."""
# TODO(user): Should we set this based on MAX_BLOB_FETCH_SIZE?
_BLOB_BUFFER_SIZE = 64000
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parmaeters.
INITIAL_POSITION_PARAM = "initial_position"
END_POSITION_PARAM = "end_position"
BLOB_KEY_PARAM = "blob_key"
def __init__(self, blob_key, start_position, end_position):
"""Initializes this instance with the given blob key and character range.
This BlobstoreInputReader will read from the first record starting after
strictly after start_position until the first record ending at or after
end_position (exclusive). As an exception, if start_position is 0, then
this InputReader starts reading at the first record.
Args:
blob_key: the BlobKey that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
"""
self._blob_key = blob_key
self._blob_reader = blobstore.BlobReader(blob_key,
self._BLOB_BUFFER_SIZE,
start_position)
self._end_position = end_position
self._has_iterated = False
self._read_before_start = bool(start_position)
def next(self):
"""Returns the next input from as an (offset, line) tuple."""
self._has_iterated = True
if self._read_before_start:
self._blob_reader.readline()
self._read_before_start = False
start_position = self._blob_reader.tell()
if start_position >= self._end_position:
raise StopIteration()
line = self._blob_reader.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this BlobstoreLineInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._blob_reader.tell(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = mapper_spec.params
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
class BlobstoreZipInputReader(InputReader):
"""Input reader for files from a zip archive stored in the Blobstore.
Each instance of the reader will read the TOC, from the end of the zip file,
and then only the contained files which it is responsible for.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Mapreduce parameters.
BLOB_KEY_PARAM = "blob_key"
START_INDEX_PARAM = "start_index"
END_INDEX_PARAM = "end_index"
def __init__(self, blob_key, start_index, end_index,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipInputReader will read from the file with index start_index
up to but not including the file with index end_index.
Args:
blob_key: the BlobKey that this input reader is processing.
start_index: the index of the first file to read.
end_index: the index of the first file that will not be read.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_index = start_index
self._end_index = end_index
self._reader = _reader
self._zip = None
self._entries = None
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._zip.read(entry.filename))
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
def __str__(self):
"""Returns the string representation of this BlobstoreZipInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._start_index, self._end_index)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = mapper_spec.params
if cls.BLOB_KEY_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
files = zip_input.infolist()
total_size = sum(x.file_size for x in files)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
# Break the list of files into sublists, each of approximately
# size_per_shard bytes.
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(files):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(files):
shard_start_indexes.append(len(files))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
class BlobstoreZipLineInputReader(InputReader):
"""Input reader for newline delimited files in zip archives from Blobstore.
This has the same external interface as the BlobstoreLineInputReader, in that
it takes a list of blobs as its input and yields lines to the reader.
However the blobs themselves are expected to be zip archives of line delimited
files instead of the files themselves.
This is useful as many line delimited files gain greatly from compression.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parameters.
BLOB_KEY_PARAM = "blob_key"
START_FILE_INDEX_PARAM = "start_file_index"
END_FILE_INDEX_PARAM = "end_file_index"
OFFSET_PARAM = "offset"
def __init__(self, blob_key, start_file_index, end_file_index, offset,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipLineInputReader will read from the file with index
start_file_index up to but not including the file with index end_file_index.
It will return lines starting at offset within file[start_file_index]
Args:
blob_key: the BlobKey that this input reader is processing.
start_file_index: the index of the first file to read within the zip.
end_file_index: the index of the first file that will not be read.
offset: the byte offset within blob_key.zip[start_file_index] to start
reading. The reader will continue to the end of the file.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_file_index = start_file_index
self._end_file_index = end_file_index
self._initial_offset = offset
self._reader = _reader
self._zip = None
self._entries = None
self._filestream = None
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = mapper_spec.params
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
# We can break on both blob key and file-within-zip boundaries.
# A shard will span at minimum a single blob key, but may only
# handle a few files within a blob.
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
files = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in files:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers
def next(self):
"""Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n"))
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
@classmethod
def from_json(cls, json, _reader=blobstore.BlobReader):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM],
_reader)
def __str__(self):
"""Returns the string representation of this reader.
Returns:
string blobkey:[start file num, end file num]:current offset.
"""
return "blobstore.BlobKey(%r):[%d, %d]:%d" % (
self._blob_key, self._start_file_index, self._end_file_index,
self._next_offset())
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status page handler for mapreduce framework."""
import os
import time
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import model
from google.appengine.ext.webapp import template
# TODO(user): a list of features we'd like to have in status page:
# - show sparklet of entities/sec on index page
# - shard bar chart should color finished shards differently
# mapreduce.yaml file names
MR_YAML_NAMES = ["mapreduce.yaml", "mapreduce.yml"]
class Error(Exception):
"""Base class for exceptions in this module."""
class BadStatusParameterError(Exception):
"""A parameter passed to a status handler was invalid."""
class BadYamlError(Error):
"""Raised when the mapreduce.yaml file is invalid."""
class MissingYamlError(BadYamlError):
"""Raised when the mapreduce.yaml file could not be found."""
class MultipleDocumentsInMrYaml(BadYamlError):
"""There's more than one document in mapreduce.yaml file."""
class UserParam(validation.Validated):
"""A user-supplied parameter to a mapreduce job."""
ATTRIBUTES = {
"name": r"[a-zA-Z0-9_\.]+",
"default": validation.Optional(r".*"),
"value": validation.Optional(r".*"),
}
class MapperInfo(validation.Validated):
"""Configuration parameters for the mapper part of the job."""
ATTRIBUTES = {
"handler": r".+",
"input_reader": r".+",
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapreduceInfo(validation.Validated):
"""Mapreduce description in mapreduce.yaml."""
ATTRIBUTES = {
"name": r".+",
"mapper": MapperInfo,
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapReduceYaml(validation.Validated):
"""Root class for mapreduce.yaml.
File format:
mapreduce:
- name: <mapreduce_name>
mapper:
- input_reader: google.appengine.ext.mapreduce.DatastoreInputReader
- handler: path_to_my.MapperFunction
- params:
- name: foo
default: bar
- name: blah
default: stuff
- params_validator: path_to_my.ValidatorFunction
Where
mapreduce_name: The name of the mapreduce. Used for UI purposes.
mapper_handler_spec: Full <module_name>.<function_name/class_name> of
mapper handler. See MapreduceSpec class documentation for full handler
specification.
input_reader: Full <module_name>.<function_name/class_name> of the
InputReader sub-class to use for the mapper job.
params: A list of optional parameter names and optional default values
that may be supplied or overridden by the user running the job.
params_validator is full <module_name>.<function_name/class_name> of
a callable to validate the mapper_params after they are input by the
user running the job.
"""
ATTRIBUTES = {
"mapreduce": validation.Optional(validation.Repeated(MapreduceInfo))
}
@staticmethod
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
all_configs.append(out)
return all_configs
# N.B. Sadly, we currently don't have and ability to determine
# application root dir at run time. We need to walk up the directory structure
# to find it.
def find_mapreduce_yaml():
"""Traverse up from current directory and find mapreduce.yaml file.
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = os.path.dirname(__file__)
while dir:
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
parent = os.path.dirname(dir)
if parent == dir:
break
dir = parent
return None
def parse_mapreduce_yaml(contents):
"""Parses mapreduce.yaml file contents.
Args:
contents: mapreduce.yaml file contents.
Returns:
MapReduceYaml object with all the data from original file.
Raises:
BadYamlError: when contents is not a valid mapreduce.yaml file.
"""
try:
builder = yaml_object.ObjectBuilder(MapReduceYaml)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(contents)
mr_info = handler.GetResults()
except (ValueError, yaml_errors.EventError), e:
raise BadYamlError(e)
if len(mr_info) < 1:
raise BadYamlError("No configs found in mapreduce.yaml")
if len(mr_info) > 1:
raise MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info))
jobs = mr_info[0]
job_names = set(j.name for j in jobs.mapreduce)
if len(jobs.mapreduce) != len(job_names):
raise BadYamlError("Overlapping mapreduce names; names must be unique")
return jobs
def get_mapreduce_yaml(parse=parse_mapreduce_yaml):
"""Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing.
"""
mr_yaml_path = find_mapreduce_yaml()
if not mr_yaml_path:
raise MissingYamlError()
mr_yaml_file = open(mr_yaml_path)
try:
return parse(mr_yaml_file.read())
finally:
mr_yaml_file.close()
class ResourceHandler(base_handler.BaseHandler):
"""Handler for static resources."""
_RESOURCE_MAP = {
"status": ("overview.html", "text/html"),
"detail": ("detail.html", "text/html"),
"base.css": ("base.css", "text/css"),
"jquery.js": ("jquery-1.4.2.min.js", "text/javascript"),
"status.js": ("status.js", "text/javascript"),
}
def get(self, relative):
if relative not in self._RESOURCE_MAP:
self.response.set_status(404)
self.response.out.write("Resource not found.")
return
real_path, content_type = self._RESOURCE_MAP[relative]
path = os.path.join(os.path.dirname(__file__), "static", real_path)
self.response.headers["Cache-Control"] = "public; max-age=300"
self.response.headers["Content-Type"] = content_type
self.response.out.write(open(path).read())
class ListConfigsHandler(base_handler.JsonHandler):
"""Lists mapreduce configs as JSON for users to start jobs."""
def handle(self):
self.json_response["configs"] = MapReduceYaml.to_dict(get_mapreduce_yaml())
class ListJobsHandler(base_handler.JsonHandler):
"""Lists running and completed mapreduce jobs for an overview as JSON."""
def handle(self):
cursor = self.request.get("cursor")
count = int(self.request.get("count", "50"))
query = model.MapreduceState.all()
if cursor:
query.filter("__key__ >=", db.Key(cursor))
query.order("__key__")
jobs_list = query.fetch(count + 1)
if len(jobs_list) == (count + 1):
self.json_response["cursor"] = str(jobs_list[-1].key())
jobs_list = jobs_list[:-1]
all_jobs = []
for job in jobs_list:
out = {
# Data shared between overview and detail pages.
"name": job.mapreduce_spec.name,
"mapreduce_id": job.mapreduce_spec.mapreduce_id,
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to overview page.
"chart_url": job.sparkline_url,
"active_shards": job.active_shards,
"shards": job.mapreduce_spec.mapper.shard_count,
}
if job.result_status:
out["result_status"] = job.result_status
all_jobs.append(out)
self.json_response["jobs"] = all_jobs
class GetJobDetailHandler(base_handler.JsonHandler):
"""Retrieves the details of a mapreduce job as JSON."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
if not mapreduce_id:
raise BadStatusParameterError("'mapreduce_id' was invalid")
job = model.MapreduceState.get_by_key_name(mapreduce_id)
if job is None:
raise KeyError("Could not find job with ID %r" % mapreduce_id)
self.json_response.update(job.mapreduce_spec.to_json())
self.json_response.update(job.counters_map.to_json())
self.json_response.update({
# Shared with overview page.
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to detail page.
"chart_url": job.chart_url,
})
self.json_response["result_status"] = job.result_status
shards_list = model.ShardState.find_by_mapreduce_id(mapreduce_id)
all_shards = []
shards_list.sort(key=lambda x: x.shard_number)
for shard in shards_list:
out = {
"active": shard.active,
"result_status": shard.result_status,
"shard_number": shard.shard_number,
"shard_id": shard.shard_id,
"updated_timestamp_ms":
int(time.mktime(shard.update_time.utctimetuple()) * 1000),
"shard_description": shard.shard_description,
"last_work_item": shard.last_work_item,
}
out.update(shard.counters_map.to_json())
all_shards.append(out)
self.json_response["shards"] = all_shards
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Status page handler for mapreduce framework."""
import os
import time
from google.appengine.api import validation
from google.appengine.api import yaml_builder
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_listener
from google.appengine.api import yaml_object
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import model
from google.appengine.ext.webapp import template
# TODO(user): a list of features we'd like to have in status page:
# - show sparklet of entities/sec on index page
# - shard bar chart should color finished shards differently
# mapreduce.yaml file names
MR_YAML_NAMES = ["mapreduce.yaml", "mapreduce.yml"]
class Error(Exception):
"""Base class for exceptions in this module."""
class BadStatusParameterError(Exception):
"""A parameter passed to a status handler was invalid."""
class BadYamlError(Error):
"""Raised when the mapreduce.yaml file is invalid."""
class MissingYamlError(BadYamlError):
"""Raised when the mapreduce.yaml file could not be found."""
class MultipleDocumentsInMrYaml(BadYamlError):
"""There's more than one document in mapreduce.yaml file."""
class UserParam(validation.Validated):
"""A user-supplied parameter to a mapreduce job."""
ATTRIBUTES = {
"name": r"[a-zA-Z0-9_\.]+",
"default": validation.Optional(r".*"),
"value": validation.Optional(r".*"),
}
class MapperInfo(validation.Validated):
"""Configuration parameters for the mapper part of the job."""
ATTRIBUTES = {
"handler": r".+",
"input_reader": r".+",
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapreduceInfo(validation.Validated):
"""Mapreduce description in mapreduce.yaml."""
ATTRIBUTES = {
"name": r".+",
"mapper": MapperInfo,
"params": validation.Optional(validation.Repeated(UserParam)),
"params_validator": validation.Optional(r".+"),
}
class MapReduceYaml(validation.Validated):
"""Root class for mapreduce.yaml.
File format:
mapreduce:
- name: <mapreduce_name>
mapper:
- input_reader: google.appengine.ext.mapreduce.DatastoreInputReader
- handler: path_to_my.MapperFunction
- params:
- name: foo
default: bar
- name: blah
default: stuff
- params_validator: path_to_my.ValidatorFunction
Where
mapreduce_name: The name of the mapreduce. Used for UI purposes.
mapper_handler_spec: Full <module_name>.<function_name/class_name> of
mapper handler. See MapreduceSpec class documentation for full handler
specification.
input_reader: Full <module_name>.<function_name/class_name> of the
InputReader sub-class to use for the mapper job.
params: A list of optional parameter names and optional default values
that may be supplied or overridden by the user running the job.
params_validator is full <module_name>.<function_name/class_name> of
a callable to validate the mapper_params after they are input by the
user running the job.
"""
ATTRIBUTES = {
"mapreduce": validation.Optional(validation.Repeated(MapreduceInfo))
}
@staticmethod
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
all_configs.append(out)
return all_configs
# N.B. Sadly, we currently don't have and ability to determine
# application root dir at run time. We need to walk up the directory structure
# to find it.
def find_mapreduce_yaml():
"""Traverse up from current directory and find mapreduce.yaml file.
Returns:
the path of mapreduce.yaml file or None if not found.
"""
dir = os.path.dirname(__file__)
while dir:
for mr_yaml_name in MR_YAML_NAMES:
yaml_path = os.path.join(dir, mr_yaml_name)
if os.path.exists(yaml_path):
return yaml_path
parent = os.path.dirname(dir)
if parent == dir:
break
dir = parent
return None
def parse_mapreduce_yaml(contents):
"""Parses mapreduce.yaml file contents.
Args:
contents: mapreduce.yaml file contents.
Returns:
MapReduceYaml object with all the data from original file.
Raises:
BadYamlError: when contents is not a valid mapreduce.yaml file.
"""
try:
builder = yaml_object.ObjectBuilder(MapReduceYaml)
handler = yaml_builder.BuilderHandler(builder)
listener = yaml_listener.EventListener(handler)
listener.Parse(contents)
mr_info = handler.GetResults()
except (ValueError, yaml_errors.EventError), e:
raise BadYamlError(e)
if len(mr_info) < 1:
raise BadYamlError("No configs found in mapreduce.yaml")
if len(mr_info) > 1:
raise MultipleDocumentsInMrYaml("Found %d YAML documents" % len(mr_info))
jobs = mr_info[0]
job_names = set(j.name for j in jobs.mapreduce)
if len(jobs.mapreduce) != len(job_names):
raise BadYamlError("Overlapping mapreduce names; names must be unique")
return jobs
def get_mapreduce_yaml(parse=parse_mapreduce_yaml):
"""Locates mapreduce.yaml, loads and parses its info.
Args:
parse: Used for testing.
Returns:
MapReduceYaml object.
Raises:
BadYamlError: when contents is not a valid mapreduce.yaml file or the
file is missing.
"""
mr_yaml_path = find_mapreduce_yaml()
if not mr_yaml_path:
raise MissingYamlError()
mr_yaml_file = open(mr_yaml_path)
try:
return parse(mr_yaml_file.read())
finally:
mr_yaml_file.close()
class ResourceHandler(base_handler.BaseHandler):
"""Handler for static resources."""
_RESOURCE_MAP = {
"status": ("overview.html", "text/html"),
"detail": ("detail.html", "text/html"),
"base.css": ("base.css", "text/css"),
"jquery.js": ("jquery-1.4.2.min.js", "text/javascript"),
"status.js": ("status.js", "text/javascript"),
}
def get(self, relative):
if relative not in self._RESOURCE_MAP:
self.response.set_status(404)
self.response.out.write("Resource not found.")
return
real_path, content_type = self._RESOURCE_MAP[relative]
path = os.path.join(os.path.dirname(__file__), "static", real_path)
self.response.headers["Cache-Control"] = "public; max-age=300"
self.response.headers["Content-Type"] = content_type
self.response.out.write(open(path).read())
class ListConfigsHandler(base_handler.JsonHandler):
"""Lists mapreduce configs as JSON for users to start jobs."""
def handle(self):
self.json_response["configs"] = MapReduceYaml.to_dict(get_mapreduce_yaml())
class ListJobsHandler(base_handler.JsonHandler):
"""Lists running and completed mapreduce jobs for an overview as JSON."""
def handle(self):
cursor = self.request.get("cursor")
count = int(self.request.get("count", "50"))
query = model.MapreduceState.all()
if cursor:
query.filter("__key__ >=", db.Key(cursor))
query.order("__key__")
jobs_list = query.fetch(count + 1)
if len(jobs_list) == (count + 1):
self.json_response["cursor"] = str(jobs_list[-1].key())
jobs_list = jobs_list[:-1]
all_jobs = []
for job in jobs_list:
out = {
# Data shared between overview and detail pages.
"name": job.mapreduce_spec.name,
"mapreduce_id": job.mapreduce_spec.mapreduce_id,
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to overview page.
"chart_url": job.sparkline_url,
"active_shards": job.active_shards,
"shards": job.mapreduce_spec.mapper.shard_count,
}
if job.result_status:
out["result_status"] = job.result_status
all_jobs.append(out)
self.json_response["jobs"] = all_jobs
class GetJobDetailHandler(base_handler.JsonHandler):
"""Retrieves the details of a mapreduce job as JSON."""
def handle(self):
mapreduce_id = self.request.get("mapreduce_id")
if not mapreduce_id:
raise BadStatusParameterError("'mapreduce_id' was invalid")
job = model.MapreduceState.get_by_key_name(mapreduce_id)
if job is None:
raise KeyError("Could not find job with ID %r" % mapreduce_id)
self.json_response.update(job.mapreduce_spec.to_json())
self.json_response.update(job.counters_map.to_json())
self.json_response.update({
# Shared with overview page.
"active": job.active,
"start_timestamp_ms":
int(time.mktime(job.start_time.utctimetuple()) * 1000),
"updated_timestamp_ms":
int(time.mktime(job.last_poll_time.utctimetuple()) * 1000),
# Specific to detail page.
"chart_url": job.chart_url,
})
self.json_response["result_status"] = job.result_status
shards_list = model.ShardState.find_by_mapreduce_id(mapreduce_id)
all_shards = []
shards_list.sort(key=lambda x: x.shard_number)
for shard in shards_list:
out = {
"active": shard.active,
"result_status": shard.result_status,
"shard_number": shard.shard_number,
"shard_id": shard.shard_id,
"updated_timestamp_ms":
int(time.mktime(shard.update_time.utctimetuple()) * 1000),
"shard_description": shard.shard_description,
"last_work_item": shard.last_work_item,
}
out.update(shard.counters_map.to_json())
all_shards.append(out)
self.json_response["shards"] = all_shards
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for controlling MapReduce execution outside of MapReduce framework."""
__all__ = ["start_map"]
# pylint: disable-msg=C6409
from mapreduce import handlers
from mapreduce import model
def start_map(name,
handler_spec,
reader_spec,
reader_parameters,
shard_count,
mapreduce_parameters={},
base_path="/mapreduce",
queue_name="default",
eta=None,
countdown=None,
_app=None):
"""Start a new, mapper-only mapreduce.
Args:
name: mapreduce name. Used only for display purposes.
handler_spec: fully qualified name of mapper handler function/class to call.
reader_spec: fully qualified name of mapper reader to use
reader_parameters: dictionary of parameters to pass to reader. These are
reader-specific.
shard_count: number of shards to create.
mapreduce_parameters: dictionary of mapreduce parameters relevant to the
whole job.
base_path: base path of mapreduce library handler specified in app.yaml.
"/mapreduce" by default.
queue_name: executor queue name to be used for mapreduce tasks.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
Returns:
mapreduce id as string.
"""
mapper_spec = model.MapperSpec(handler_spec, reader_spec, reader_parameters,
shard_count)
return handlers.StartJobHandler._start_map(
name,
mapper_spec,
mapreduce_parameters,
base_path=base_path,
queue_name=queue_name,
eta=eta,
countdown=countdown,
_app=_app)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base handler class for all mapreduce handlers.
"""
import logging
from mapreduce.lib import simplejson
from google.appengine.ext import webapp
class BaseHandler(webapp.RequestHandler):
"""Base class for all mapreduce handlers."""
def base_path(self):
"""Base path for all mapreduce-related urls."""
path = self.request.path
return path[:path.rfind("/")]
class JsonHandler(BaseHandler):
"""Base class for JSON handlers for user interface.
Sub-classes should implement the 'handle' method. They should put their
response data in the 'self.json_response' dictionary. Any exceptions raised
by the sub-class implementation will be sent in a JSON response with the
name of the error_class and the error_message.
"""
def __init__(self):
"""Initializer."""
super(BaseHandler, self).__init__()
self.json_response = {}
def get(self):
self.post()
def post(self):
self.json_response.clear()
try:
self.handle()
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
# TODO(user): Include full traceback here for the end-user.
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = simplejson.dumps(self.json_response)
except:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
def handle(self):
"""To be implemented by sub-classes."""
raise NotImplementedError()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Defines executor tasks handlers for MapReduce implementation."""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
import datetime
import logging
import math
import os
from mapreduce.lib import simplejson
import time
from google.appengine.api import memcache
from google.appengine.api.labs import taskqueue
from google.appengine.ext import db
from mapreduce import base_handler
from mapreduce import context
from mapreduce import quota
from mapreduce import model
from mapreduce import quota
from mapreduce import util
# TODO(user): Make this a product of the reader or in quotas.py
_QUOTA_BATCH_SIZE = 20
# The amount of time to perform scanning in one slice. New slice will be
# scheduled as soon as current one takes this long.
_SLICE_DURATION_SEC = 15
# Delay between consecutive controller callback invocations.
_CONTROLLER_PERIOD_SEC = 2
class Error(Exception):
"""Base class for exceptions in this module."""
class NotEnoughArgumentsError(Error):
"""Required argument is missing."""
class NoDataError(Error):
"""There is no data present for a desired input."""
class MapperWorkerCallbackHandler(base_handler.BaseHandler):
"""Callback handler for mapreduce worker task.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
shard_id: id of the shard.
slice_id: id of the slice.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
self._start_time = self._time()
shard_id = self.shard_id()
# TODO(user): Make this prettier
logging.debug("post: shard=%s slice=%s headers=%s",
shard_id, self.slice_id(), self.request.headers)
shard_state, control = db.get([
model.ShardState.get_key_by_shard_id(shard_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not shard_state:
# We're letting this task to die. It's up to controller code to
# reinitialize and restart the task.
logging.error("State not found for shard ID %r; shutting down",
shard_id)
return
if control and control.command == model.MapreduceControl.ABORT:
logging.info("Abort command received by shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_ABORTED
shard_state.put()
model.MapreduceControl.abort(spec.mapreduce_id)
return
input_reader = self.input_reader(spec.mapper)
if spec.mapper.params.get("enable_quota", True):
quota_consumer = quota.QuotaConsumer(
quota.QuotaManager(memcache.Client()),
shard_id,
_QUOTA_BATCH_SIZE)
else:
quota_consumer = None
ctx = context.Context(spec, shard_state)
context.Context._set(ctx)
try:
# consume quota ahead, because we do not want to run a datastore
# query if there's not enough quota for the shard.
if not quota_consumer or quota_consumer.check():
scan_aborted = False
entity = None
# We shouldn't fetch an entity from the reader if there's not enough
# quota to process it. Perform all quota checks proactively.
if not quota_consumer or quota_consumer.consume():
for entity in input_reader:
if isinstance(entity, db.Model):
shard_state.last_work_item = repr(entity.key())
else:
shard_state.last_work_item = repr(entity)[:100]
scan_aborted = not self.process_entity(entity, ctx)
# Check if we've got enough quota for the next entity.
if (quota_consumer and not scan_aborted and
not quota_consumer.consume()):
scan_aborted = True
if scan_aborted:
break
else:
scan_aborted = True
if not scan_aborted:
logging.info("Processing done for shard %d of job '%s'",
shard_state.shard_number, shard_state.mapreduce_id)
# We consumed extra quota item at the end of for loop.
# Just be nice here and give it back :)
if quota_consumer:
quota_consumer.put(1)
shard_state.active = False
shard_state.result_status = model.ShardState.RESULT_SUCCESS
# TODO(user): Mike said we don't want this happen in case of
# exception while scanning. Figure out when it's appropriate to skip.
ctx.flush()
finally:
context.Context._set(None)
if quota_consumer:
quota_consumer.dispose()
# Rescheduling work should always be the last statement. It shouldn't happen
# if there were any exceptions in code before it.
if shard_state.active:
self.reschedule(spec, input_reader)
def process_entity(self, entity, ctx):
"""Process a single entity.
Call mapper handler on the entity.
Args:
entity: an entity to process.
ctx: current execution context.
Returns:
True if scan should be continued, False if scan should be aborted.
"""
ctx.counters.increment(context.COUNTER_MAPPER_CALLS)
handler = ctx.mapreduce_spec.mapper.handler
if util.is_generator_function(handler):
for result in handler(entity):
if callable(result):
result(ctx)
else:
try:
if len(result) == 2:
logging.error("Collectors not implemented yet")
else:
logging.error("Got bad output tuple of length %d", len(result))
except TypeError:
logging.error(
"Handler yielded type %s, expected a callable or a tuple",
result.__class__.__name__)
else:
handler(entity)
if self._time() - self._start_time > _SLICE_DURATION_SEC:
logging.debug("Spent %s seconds. Rescheduling",
self._time() - self._start_time)
return False
return True
def shard_id(self):
"""Get shard unique identifier of this task from request.
Returns:
shard identifier as string.
"""
return str(self.request.get("shard_id"))
def slice_id(self):
"""Get slice unique identifier of this task from request.
Returns:
slice identifier as int.
"""
return int(self.request.get("slice_id"))
def input_reader(self, mapper_spec):
"""Get the reader from mapper_spec initialized with the request's state.
Args:
mapper_spec: a mapper spec containing the immutable mapper state.
Returns:
An initialized InputReader.
"""
input_reader_spec_dict = simplejson.loads(
self.request.get("input_reader_state"))
return mapper_spec.input_reader_class().from_json(
input_reader_spec_dict)
@staticmethod
def worker_parameters(mapreduce_spec,
shard_id,
slice_id,
input_reader):
"""Fill in mapper worker task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by mapper worker to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
shard_id: id of the shard (part of the whole dataset).
slice_id: id of the slice (part of the shard).
input_reader: InputReader containing the remaining inputs for this
shard.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"shard_id": shard_id,
"slice_id": str(slice_id),
"input_reader_state": input_reader.to_json_str()}
@staticmethod
def get_task_name(shard_id, slice_id):
"""Compute single worker task name.
Args:
shard_id: id of the shard (part of the whole dataset) as string.
slice_id: id of the slice (part of the shard) as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrshard-%s-%s" % (shard_id, slice_id)
def reschedule(self, mapreduce_spec, input_reader):
"""Reschedule worker task to continue scanning work.
Args:
mapreduce_spec: mapreduce specification.
input_reader: remaining input reader to process.
"""
MapperWorkerCallbackHandler.schedule_slice(
self.base_path(), mapreduce_spec, self.shard_id(),
self.slice_id() + 1, input_reader)
@classmethod
def schedule_slice(cls,
base_path,
mapreduce_spec,
shard_id,
slice_id,
input_reader,
queue_name=None,
eta=None,
countdown=None):
"""Schedule slice scanning by adding it to the task queue.
Args:
base_path: base_path of mapreduce request handlers as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
shard_id: current shard id as string.
slice_id: slice id as int.
input_reader: remaining InputReader for given shard.
queue_name: Optional queue to run on; uses the current queue of
execution or the default queue if unspecified.
eta: Absolute time when the MR should execute. May not be specified
if 'countdown' is also supplied. This may be timezone-aware or
timezone-naive.
countdown: Time in seconds into the future that this MR should execute.
Defaults to zero.
"""
task_params = MapperWorkerCallbackHandler.worker_parameters(
mapreduce_spec, shard_id, slice_id, input_reader)
task_name = MapperWorkerCallbackHandler.get_task_name(shard_id, slice_id)
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
queue_name or "default")
try:
taskqueue.Task(url=base_path + "/worker_callback",
params=task_params,
name=task_name,
eta=eta,
countdown=countdown).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class ControllerCallbackHandler(base_handler.BaseHandler):
"""Supervises mapreduce execution.
Is also responsible for gathering execution status from shards together.
This task is "continuously" running by adding itself again to taskqueue if
mapreduce is still active.
"""
def __init__(self, time_function=time.time):
"""Constructor.
Args:
time_function: time function to use to obtain current time.
"""
base_handler.BaseHandler.__init__(self)
self._time = time_function
def post(self):
"""Handle post request."""
spec = model.MapreduceSpec.from_json_str(
self.request.get("mapreduce_spec"))
# TODO(user): Make this logging prettier.
logging.debug("post: id=%s headers=%s",
spec.mapreduce_id, self.request.headers)
state, control = db.get([
model.MapreduceState.get_key_by_job_id(spec.mapreduce_id),
model.MapreduceControl.get_key_by_job_id(spec.mapreduce_id),
])
if not state:
logging.error("State not found for mapreduce_id '%s'; skipping",
spec.mapreduce_id)
return
shard_states = model.ShardState.find_by_mapreduce_id(spec.mapreduce_id)
if state.active and len(shard_states) != spec.mapper.shard_count:
# Some shards were lost
logging.error("Incorrect number of shard states: %d vs %d; "
"aborting job '%s'",
len(shard_states), spec.mapper.shard_count,
spec.mapreduce_id)
state.active = False
state.result_status = model.MapreduceState.RESULT_FAILED
model.MapreduceControl.abort(spec.mapreduce_id)
active_shards = [s for s in shard_states if s.active]
failed_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_FAILED]
aborted_shards = [s for s in shard_states
if s.result_status == model.ShardState.RESULT_ABORTED]
if state.active:
state.active = bool(active_shards)
state.active_shards = len(active_shards)
state.failed_shards = len(failed_shards)
state.aborted_shards = len(aborted_shards)
if (not state.active and control and
control.command == model.MapreduceControl.ABORT):
# User-initiated abort *after* all shards have completed.
logging.info("Abort signal received for job '%s'", spec.mapreduce_id)
state.result_status = model.MapreduceState.RESULT_ABORTED
if not state.active:
state.active_shards = 0
if not state.result_status:
# Set final result status derived from shard states.
if [s for s in shard_states
if s.result_status != model.ShardState.RESULT_SUCCESS]:
state.result_status = model.MapreduceState.RESULT_FAILED
else:
state.result_status = model.MapreduceState.RESULT_SUCCESS
logging.info("Final result for job '%s' is '%s'",
spec.mapreduce_id, state.result_status)
# We don't need a transaction here, since we change only statistics data,
# and we don't care if it gets overwritten/slightly inconsistent.
self.aggregate_state(state, shard_states)
poll_time = state.last_poll_time
state.last_poll_time = datetime.datetime.utcfromtimestamp(self._time())
if not state.active:
# This is the last execution.
# Enqueue done_callback if needed.
def put_state(state):
state.put()
done_callback = spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK)
if done_callback:
taskqueue.Task(
url=done_callback,
headers={"Mapreduce-Id": spec.mapreduce_id}).add(
spec.params.get(
model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE,
"default"),
transactional=True)
db.run_in_transaction(put_state, state)
return
else:
state.put()
processing_rate = int(spec.mapper.params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
self.refill_quotas(poll_time, processing_rate, active_shards)
ControllerCallbackHandler.reschedule(
self.base_path(), spec, self.serial_id() + 1)
def aggregate_state(self, mapreduce_state, shard_states):
"""Update current mapreduce state by aggregating shard states.
Args:
mapreduce_state: current mapreduce state as MapreduceState.
shard_states: all shard states (active and inactive). list of ShardState.
"""
processed_counts = []
mapreduce_state.counters_map.clear()
for shard_state in shard_states:
mapreduce_state.counters_map.add_map(shard_state.counters_map)
processed_counts.append(shard_state.counters_map.get(
context.COUNTER_MAPPER_CALLS))
mapreduce_state.set_processed_counts(processed_counts)
def refill_quotas(self,
last_poll_time,
processing_rate,
active_shard_states):
"""Refill quotas for all active shards.
Args:
last_poll_time: Datetime with the last time the job state was updated.
processing_rate: How many items to process per second overall.
active_shard_states: All active shard states, list of ShardState.
"""
if not active_shard_states:
return
quota_manager = quota.QuotaManager(memcache.Client())
current_time = int(self._time())
last_poll_time = time.mktime(last_poll_time.timetuple())
total_quota_refill = processing_rate * max(0, current_time - last_poll_time)
quota_refill = int(math.ceil(
1.0 * total_quota_refill / len(active_shard_states)))
if not quota_refill:
return
# TODO(user): use batch memcache API to refill quota in one API call.
for shard_state in active_shard_states:
quota_manager.put(shard_state.shard_id, quota_refill)
def serial_id(self):
"""Get serial unique identifier of this task from request.
Returns:
serial identifier as int.
"""
return int(self.request.get("serial_id"))
@staticmethod
def get_task_name(mapreduce_spec, serial_id):
"""Compute single controller task name.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
task name which should be used to process specified shard/slice.
"""
# Prefix the task name with something unique to this framework's
# namespace so we don't conflict with user tasks on the queue.
return "appengine-mrcontrol-%s-%s" % (
mapreduce_spec.mapreduce_id, serial_id)
@staticmethod
def controller_parameters(mapreduce_spec, serial_id):
"""Fill in controller task parameters.
Returned parameters map is to be used as task payload, and it contains
all the data, required by controller to perform its function.
Args:
mapreduce_spec: specification of the mapreduce.
serial_id: id of the invocation as int.
Returns:
string->string map of parameters to be used as task payload.
"""
return {"mapreduce_spec": mapreduce_spec.to_json_str(),
"serial_id": str(serial_id)}
@classmethod
def reschedule(cls, base_path, mapreduce_spec, serial_id, queue_name=None):
"""Schedule new update status callback task.
Args:
base_path: mapreduce handlers url base path as string.
mapreduce_spec: mapreduce specification as MapreduceSpec.
serial_id: id of the invocation as int.
queue_name: The queue to schedule this task on. Will use the current
queue of execution if not supplied.
"""
task_name = ControllerCallbackHandler.get_task_name(
mapreduce_spec, serial_id)
task_params = ControllerCallbackHandler.controller_parameters(
mapreduce_spec, serial_id)
if not queue_name:
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
try:
taskqueue.Task(url=base_path + "/controller_callback",
name=task_name, params=task_params,
countdown=_CONTROLLER_PERIOD_SEC).add(queue_name)
except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e:
logging.warning("Task %r with params %r already exists. %s: %s",
task_name, task_params, e.__class__, e)
class KickOffJobHandler(base_handler.BaseHandler):
"""Taskqueue handler which kicks off a mapreduce processing.
Request Parameters:
mapreduce_spec: MapreduceSpec of the mapreduce serialized to json.
input_readers: List of InputReaders objects separated by semi-colons.
"""
def post(self):
"""Handles kick off request."""
spec = model.MapreduceSpec.from_json_str(
self._get_required_param("mapreduce_spec"))
input_readers_json = simplejson.loads(
self._get_required_param("input_readers"))
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default")
mapper_input_reader_class = spec.mapper.input_reader_class()
input_readers = [mapper_input_reader_class.from_json_str(reader_json)
for reader_json in input_readers_json]
KickOffJobHandler._schedule_shards(
spec, input_readers, queue_name, self.base_path())
ControllerCallbackHandler.reschedule(
self.base_path(), spec, queue_name=queue_name, serial_id=0)
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _schedule_shards(cls, spec, input_readers, queue_name, base_path):
"""Prepares shard states and schedules their execution.
Args:
spec: mapreduce specification as MapreduceSpec.
input_readers: list of InputReaders describing shard splits.
queue_name: The queue to run this job on.
base_path: The base url path of mapreduce callbacks.
"""
# Note: it's safe to re-attempt this handler because:
# - shard state has deterministic and unique key.
# - schedule_slice will fall back gracefully if a task already exists.
shard_states = []
for shard_number, input_reader in enumerate(input_readers):
shard = model.ShardState.create_new(spec.mapreduce_id, shard_number)
shard.shard_description = str(input_reader)
shard_states.append(shard)
# Retrievs already existing shards.
existing_shard_states = db.get(shard.key() for shard in shard_states)
existing_shard_keys = set(shard.key() for shard in existing_shard_states
if shard is not None)
# Puts only non-existing shards.
db.put(shard for shard in shard_states
if shard.key() not in existing_shard_keys)
for shard_number, input_reader in enumerate(input_readers):
shard_id = model.ShardState.shard_id_from_number(
spec.mapreduce_id, shard_number)
MapperWorkerCallbackHandler.schedule_slice(
base_path, spec, shard_id, 0, input_reader, queue_name=queue_name)
class StartJobHandler(base_handler.JsonHandler):
"""Command handler starts a mapreduce job."""
def handle(self):
"""Handles start request."""
# Mapper spec as form arguments.
mapreduce_name = self._get_required_param("name")
mapper_input_reader_spec = self._get_required_param("mapper_input_reader")
mapper_handler_spec = self._get_required_param("mapper_handler")
mapper_params = self._get_params(
"mapper_params_validator", "mapper_params.")
params = self._get_params(
"params_validator", "params.")
# Set some mapper param defaults if not present.
mapper_params["processing_rate"] = int(mapper_params.get(
"processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC)
queue_name = mapper_params["queue_name"] = mapper_params.get(
"queue_name", "default")
# Validate the Mapper spec, handler, and input reader.
mapper_spec = model.MapperSpec(
mapper_handler_spec,
mapper_input_reader_spec,
mapper_params,
int(mapper_params.get("shard_count", model._DEFAULT_SHARD_COUNT)))
mapreduce_id = type(self)._start_map(
mapreduce_name,
mapper_spec,
params,
base_path=self.base_path(),
queue_name=queue_name,
_app=mapper_params.get("_app"))
self.json_response["mapreduce_id"] = mapreduce_id
def _get_params(self, validator_parameter, name_prefix):
"""Retrieves additional user-supplied params for the job and validates them.
Args:
validator_parameter: name of the request parameter which supplies
validator for this parameter set.
name_prefix: common prefix for all parameter names in the request.
Raises:
Any exception raised by the 'params_validator' request parameter if
the params fail to validate.
"""
params_validator = self.request.get(validator_parameter)
user_params = {}
for key in self.request.arguments():
if key.startswith(name_prefix):
values = self.request.get_all(key)
adjusted_key = key[len(name_prefix):]
if len(values) == 1:
user_params[adjusted_key] = values[0]
else:
user_params[adjusted_key] = values
if params_validator:
resolved_validator = util.for_name(params_validator)
resolved_validator(user_params)
return user_params
def _get_required_param(self, param_name):
"""Get a required request parameter.
Args:
param_name: name of request parameter to fetch.
Returns:
parameter value
Raises:
NotEnoughArgumentsError: if parameter is not specified.
"""
value = self.request.get(param_name)
if not value:
raise NotEnoughArgumentsError(param_name + " not specified")
return value
@classmethod
def _start_map(cls, name, mapper_spec,
mapreduce_params,
base_path="/mapreduce",
queue_name="default",
eta=None,
countdown=None,
_app=None):
# Check that handler can be instantiated.
mapper_spec.get_handler()
mapper_input_reader_class = mapper_spec.input_reader_class()
mapper_input_readers = mapper_input_reader_class.split_input(mapper_spec)
if not mapper_input_readers:
raise NoDataError("Found no mapper input readers to process.")
mapper_spec.shard_count = len(mapper_input_readers)
state = model.MapreduceState.create_new()
mapreduce_spec = model.MapreduceSpec(
name,
state.key().id_or_name(),
mapper_spec.to_json(),
mapreduce_params)
state.mapreduce_spec = mapreduce_spec
state.active = True
state.active_shards = mapper_spec.shard_count
if _app:
state.app_id = _app
# TODO(user): Initialize UI fields correctly.
state.char_url = ""
state.sparkline_url = ""
def schedule_mapreduce(state, mapper_input_readers, eta, countdown):
state.put()
readers_json = [reader.to_json_str() for reader in mapper_input_readers]
taskqueue.Task(
url=base_path + "/kickoffjob_callback",
params={"mapreduce_spec": state.mapreduce_spec.to_json_str(),
"input_readers": simplejson.dumps(readers_json)},
eta=eta, countdown=countdown).add(queue_name, transactional=True)
# Point of no return: We're actually going to run this job!
db.run_in_transaction(
schedule_mapreduce, state, mapper_input_readers, eta, countdown)
return state.key().id_or_name()
class CleanUpJobHandler(base_handler.JsonHandler):
"""Command to kick off tasks to clean up a job's data."""
def handle(self):
# TODO(user): Have this kick off a task to clean up all MapreduceState,
# ShardState, and MapreduceControl entities for a job ID.
self.json_response["status"] = "This does nothing yet."
class AbortJobHandler(base_handler.JsonHandler):
"""Command to abort a running job."""
def handle(self):
model.MapreduceControl.abort(self.request.get("mapreduce_id"))
self.json_response["status"] = "Abort signal sent."
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model classes which are used to communicate between parts of implementation.
These model classes are describing mapreduce, its current state and
communication messages. They are either stored in the datastore or
serialized to/from json and passed around with other means.
"""
# Disable "Invalid method name"
# pylint: disable-msg=C6409
__all__ = ["JsonMixin", "JsonProperty", "MapreduceState", "MapperSpec",
"MapreduceControl", "MapreduceSpec", "ShardState", "CountersMap"]
import copy
import datetime
import logging
import math
import random
from mapreduce.lib import simplejson
import time
import types
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.ext import db
from mapreduce import context
from mapreduce import util
from mapreduce.lib.graphy.backends import google_chart_api
# Default rate of processed entities per second.
_DEFAULT_PROCESSING_RATE_PER_SEC = 100
# Default number of shards to have.
_DEFAULT_SHARD_COUNT = 8
class JsonMixin(object):
"""Simple, stateless json utilities mixin.
Requires class to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def to_json_str(self):
"""Convert data to json string representation.
Returns:
json representation as string.
"""
return simplejson.dumps(self.to_json(), sort_keys=True)
@classmethod
def from_json_str(cls, json_str):
"""Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string.
"""
return cls.from_json(simplejson.loads(json_str))
class JsonProperty(db.UnindexedProperty):
"""Property type for storing json representation of data.
Requires data types to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def __init__(self, data_type, default=None, **kwargs):
"""Constructor.
Args:
data_type: underlying data type as class.
default: default value for the property. The value is deep copied
fore each model instance.
kwargs: remaining arguments.
"""
kwargs["default"] = default
super(JsonProperty, self).__init__(**kwargs)
self.data_type = data_type
def get_value_for_datastore(self, model_instance):
"""Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
"""
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
return datastore_types.Text(simplejson.dumps(
value.to_json(), sort_keys=True))
def make_value_from_datastore(self, value):
"""Convert value from datastore representation.
Args:
value: datastore value.
Returns:
value to store in the model.
"""
if value is None:
return None
return self.data_type.from_json(simplejson.loads(value))
def validate(self, value):
"""Validate value.
Args:
value: model value.
Returns:
Whether the specified value is valid data type value.
Raises:
BadValueError: when value is not of self.data_type type.
"""
if value is not None and not isinstance(value, self.data_type):
raise datastore_errors.BadValueError(
"Property %s must be convertible to a %s instance (%s)" %
(self.name, self.data_type, value))
return super(JsonProperty, self).validate(value)
def empty(self, value):
"""Checks if value is empty.
Args:
value: model value.
Returns:
True passed value is empty.
"""
return not value
def default_value(self):
"""Create default model value.
If default option was specified, then it will be deeply copied.
None otherwise.
Returns:
default model value.
"""
if self.default:
return copy.deepcopy(self.default)
else:
return None
# Ridiculous future UNIX epoch time, 500 years from now.
_FUTURE_TIME = 2**34
def _get_descending_key(gettime=time.time, getrandint=random.randint):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
getrandint: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
tie_breaker = getrandint(0, 100)
return "%d%d" % (now_descending, tie_breaker)
class CountersMap(JsonMixin):
"""Maintains map from counter name to counter value.
The class is used to provide basic arithmetics of counter values (buil
add/remove), increment individual values and store/load data from json.
"""
def __init__(self, initial_map=None):
"""Constructor.
Args:
initial_map: initial counter values map from counter name (string) to
counter value (int).
"""
if initial_map:
self.counters = initial_map
else:
self.counters = {}
def __repr__(self):
"""Compute string representation."""
return "mapreduce.model.CountersMap(%r)" % self.counters
def get(self, counter_name):
"""Get current counter value.
Args:
counter_name: counter name as string.
Returns:
current counter value as int. 0 if counter was not set.
"""
return self.counters.get(counter_name, 0)
def increment(self, counter_name, delta):
"""Increment counter value.
Args:
counter_name: counter name as String.
delta: increment delta as Integer.
Returns:
new counter value.
"""
current_value = self.counters.get(counter_name, 0)
new_value = current_value + delta
self.counters[counter_name] = new_value
return new_value
def add_map(self, counters_map):
"""Add all counters from the map.
For each counter in the passed map, adds its value to the counter in this
map.
Args:
counters_map: CounterMap instance to add.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, counters_map.counters[counter_name])
def sub_map(self, counters_map):
"""Subtracts all counters from the map.
For each counter in the passed map, subtracts its value to the counter in
this map.
Args:
counters_map: CounterMap instance to subtract.
"""
for counter_name in counters_map.counters:
self.increment(counter_name, -counters_map.counters[counter_name])
def clear(self):
"""Clear all values."""
self.counters = {}
def to_json(self):
"""Serializes all the data in this map into json form.
Returns:
json-compatible data representation.
"""
return {"counters": self.counters}
@classmethod
def from_json(cls, json):
"""Create new CountersMap from the json data structure, encoded by to_json.
Args:
json: json representation of CountersMap .
Returns:
an instance of CountersMap with all data deserialized from json.
"""
counters_map = cls()
counters_map.counters = json["counters"]
return counters_map
class MapperSpec(JsonMixin):
"""Contains a specification for the mapper phase of the mapreduce.
MapperSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapperSpec is
passed as a payload to all mapreduce tasks in JSON encoding as part of
MapreduceSpec.
Specifying mapper handlers:
* '<module_name>.<class_name>' - __call__ method of class instance will be
called
* '<module_name>.<function_name>' - function will be called.
* '<module_name>.<class_name>.<method_name>' - class will be instantiated
and method called.
"""
def __init__(self, handler_spec, input_reader_spec, params, shard_count):
"""Creates a new MapperSpec.
Args:
handler_spec: handler specification as string (see class doc for
details).
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
shard_count: number of shards to process in parallel.
Properties:
handler_spec: name of handler class/function to use.
shard_count: number of shards to process in parallel.
handler: cached instance of mapper handler as callable.
input_reader_spec: The class name of the input reader to use.
params: Dictionary of additional parameters for the mapper.
"""
self.handler_spec = handler_spec
self.__handler = None
self.input_reader_spec = input_reader_spec
self.shard_count = shard_count
self.params = params
def get_handler(self):
"""Get mapper handler instance.
Returns:
cached handler instance as callable.
"""
if self.__handler is None:
resolved_spec = util.for_name(self.handler_spec)
if isinstance(resolved_spec, type):
# create new instance if this is type
self.__handler = resolved_spec()
elif isinstance(resolved_spec, types.MethodType):
# bind the method
self.__handler = getattr(resolved_spec.im_class(),
resolved_spec.__name__)
else:
self.__handler = resolved_spec
return self.__handler
handler = property(get_handler)
def input_reader_class(self):
"""Get input reader class.
Returns:
input reader class object.
"""
return util.for_name(self.input_reader_spec)
def to_json(self):
"""Serializes this MapperSpec into a json-izable object."""
return {
"mapper_handler_spec": self.handler_spec,
"mapper_input_reader": self.input_reader_spec,
"mapper_params": self.params,
"mapper_shard_count": self.shard_count,
}
@classmethod
def from_json(cls, json):
"""Creates MapperSpec from a dict-like object."""
return cls(json["mapper_handler_spec"],
json["mapper_input_reader"],
json["mapper_params"],
json["mapper_shard_count"])
class MapreduceSpec(JsonMixin):
"""Contains a specification for the whole mapreduce.
MapreduceSpec instance can be changed only during mapreduce starting process,
and it remains immutable for the rest of mapreduce execution. MapreduceSpec is
passed as a payload to all mapreduce tasks in json encoding.
"""
# Url to call when mapreduce finishes its execution.
PARAM_DONE_CALLBACK = "done_callback"
# Queue to use to call done callback
PARAM_DONE_CALLBACK_QUEUE = "done_callback_queue"
def __init__(self,
name,
mapreduce_id,
mapper_spec,
params = {}):
"""Create new MapreduceSpec.
Args:
name: The name of this mapreduce job type.
mapreduce_id: ID of the mapreduce.
mapper_spec: JSON-encoded string containing a MapperSpec.
params: dictionary of additional mapreduce parameters.
Properties:
name: The name of this mapreduce job type.
mapreduce_id: unique id of this mapreduce as string.
mapper: This MapreduceSpec's instance of MapperSpec.
params: dictionary of additional mapreduce parameters.
"""
self.name = name
self.mapreduce_id = mapreduce_id
self.mapper = MapperSpec.from_json(mapper_spec)
self.params = params
def to_json(self):
"""Serializes all data in this mapreduce spec into json form.
Returns:
data in json format.
"""
mapper_spec = self.mapper.to_json()
return {
"name": self.name,
"mapreduce_id": self.mapreduce_id,
"mapper_spec": mapper_spec,
"params": self.params,
}
@classmethod
def from_json(cls, json):
"""Create new MapreduceSpec from the json, encoded by to_json.
Args:
json: json representation of MapreduceSpec.
Returns:
an instance of MapreduceSpec with all data deserialized from json.
"""
mapreduce_spec = cls(json["name"],
json["mapreduce_id"],
json["mapper_spec"],
json.get("params"))
return mapreduce_spec
class MapreduceState(db.Model):
"""Holds accumulated state of mapreduce execution.
MapreduceState is stored in datastore with a key name equal to the
mapreduce ID. Only controller tasks can write to MapreduceState.
Properties:
mapreduce_spec: cached deserialized MapreduceSpec instance. read-only
active: if we have this mapreduce running right now
last_poll_time: last time controller job has polled this mapreduce.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
chart_url: last computed mapreduce status chart url. This chart displays the
progress of all the shards the best way it can.
sparkline_url: last computed mapreduce status chart url in small format.
result_status: If not None, the final status of the job.
active_shards: How many shards are still processing.
start_time: When the job started.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
mapreduce_spec = JsonProperty(MapreduceSpec, indexed=False)
active = db.BooleanProperty(default=True, indexed=False)
last_poll_time = db.DateTimeProperty(required=True)
counters_map = JsonProperty(CountersMap, default=CountersMap(), indexed=False)
app_id = db.StringProperty(required=False, indexed=True)
# For UI purposes only.
chart_url = db.TextProperty(default="")
sparkline_url = db.TextProperty(default="")
result_status = db.StringProperty(required=False, choices=_RESULTS)
active_shards = db.IntegerProperty(default=0, indexed=False)
failed_shards = db.IntegerProperty(default=0, indexed=False)
aborted_shards = db.IntegerProperty(default=0, indexed=False)
start_time = db.DateTimeProperty(auto_now_add=True)
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a Job.
Args:
mapreduce_id: The job to retrieve.
Returns:
Datastore Key that can be used to fetch the MapreduceState.
"""
return db.Key.from_path(cls.kind(), mapreduce_id)
def set_processed_counts(self, shards_processed):
"""Updates a chart url to display processed count for each shard.
Args:
shards_processed: list of integers with number of processed entities in
each shard
"""
chart = google_chart_api.BarChart(shards_processed)
if self.mapreduce_spec and shards_processed:
chart.bottom.labels = [
str(x) for x in xrange(self.mapreduce_spec.mapper.shard_count)]
chart.left.labels = ['0', str(max(shards_processed))]
chart.left.min = 0
self.chart_url = chart.display.Url(300, 200)
def get_processed(self):
"""Number of processed entities.
Returns:
The total number of processed entities as int.
"""
return self.counters_map.get(context.COUNTER_MAPPER_CALLS)
processed = property(get_processed)
@staticmethod
def create_new(getkeyname=_get_descending_key,
gettime=datetime.datetime.now):
"""Create a new MapreduceState.
Args:
getkeyname: Used for testing.
gettime: Used for testing.
"""
state = MapreduceState(key_name=getkeyname(),
last_poll_time=gettime())
state.set_processed_counts([])
return state
class ShardState(db.Model):
"""Single shard execution state.
The shard state is stored in the datastore and is later aggregated by
controller task. Shard key_name is equal to shard_id.
Properties:
active: if we have this shard still running as boolean.
counters_map: shard's counters map as CountersMap. Mirrors
counters_map_json.
mapreduce_id: unique id of the mapreduce.
shard_id: unique id of this shard as string.
shard_number: ordered number for this shard.
result_status: If not None, the final status of this shard.
update_time: The last time this shard state was updated.
shard_description: A string description of the work this shard will do.
last_work_item: A string description of the last work item processed.
"""
RESULT_SUCCESS = "success"
RESULT_FAILED = "failed"
RESULT_ABORTED = "aborted"
_RESULTS = frozenset([RESULT_SUCCESS, RESULT_FAILED, RESULT_ABORTED])
# Functional properties.
active = db.BooleanProperty(default=True, indexed=False)
counters_map = JsonProperty(CountersMap, default=CountersMap(), indexed=False)
result_status = db.StringProperty(choices=_RESULTS, indexed=False)
# For UI purposes only.
mapreduce_id = db.StringProperty(required=True)
update_time = db.DateTimeProperty(auto_now=True, indexed=False)
shard_description = db.TextProperty(default="")
last_work_item = db.TextProperty(default="")
def get_shard_number(self):
"""Gets the shard number from the key name."""
return int(self.key().name().split("-")[-1])
shard_number = property(get_shard_number)
def get_shard_id(self):
"""Returns the shard ID."""
return self.key().name()
shard_id = property(get_shard_id)
@classmethod
def shard_id_from_number(cls, mapreduce_id, shard_number):
"""Get shard id by mapreduce id and shard number.
Args:
mapreduce_id: mapreduce id as string.
shard_number: shard number to compute id for as int.
Returns:
shard id as string.
"""
return "%s-%d" % (mapreduce_id, shard_number)
@classmethod
def get_key_by_shard_id(cls, shard_id):
"""Retrieves the Key for this ShardState.
Args:
shard_id: The shard ID to fetch.
Returns:
The Datatore key to use to retrieve this ShardState.
"""
return db.Key.from_path(cls.kind(), shard_id)
@classmethod
def get_by_shard_id(cls, shard_id):
"""Get shard state from datastore by shard_id.
Args:
shard_id: shard id as string.
Returns:
ShardState for given shard id or None if it's not found.
"""
return cls.get_by_key_name(shard_id)
@classmethod
def find_by_mapreduce_id(cls, mapreduce_id):
"""Find all shard states for given mapreduce.
Args:
mapreduce_id: mapreduce id.
Returns:
iterable of all ShardState for given mapreduce id.
"""
return cls.all().filter("mapreduce_id =", mapreduce_id).fetch(99999)
@classmethod
def create_new(cls, mapreduce_id, shard_number):
"""Create new shard state.
Args:
mapreduce_id: unique mapreduce id as string.
shard_number: shard number for which to create shard state.
Returns:
new instance of ShardState ready to put into datastore.
"""
shard_id = cls.shard_id_from_number(mapreduce_id, shard_number)
state = cls(key_name=shard_id,
mapreduce_id=mapreduce_id)
return state
class MapreduceControl(db.Model):
"""Datastore entity used to control mapreduce job execution.
Only one command may be sent to jobs at a time.
Properties:
command: The command to send to the job.
"""
ABORT = "abort"
_COMMANDS = frozenset([ABORT])
_KEY_NAME = "command"
command = db.TextProperty(choices=_COMMANDS, required=True)
@classmethod
def get_key_by_job_id(cls, mapreduce_id):
"""Retrieves the Key for a mapreduce ID.
Args:
mapreduce_id: The job to fetch.
Returns:
Datastore Key for the command for the given job ID.
"""
return db.Key.from_path(cls.kind(), "%s:%s" % (mapreduce_id, cls._KEY_NAME))
@classmethod
def abort(cls, mapreduce_id):
"""Causes a job to abort.
Args:
mapreduce_id: The job to abort. Not verified as a valid job.
"""
cls(key_name="%s:%s" % (mapreduce_id, cls._KEY_NAME),
command=cls.ABORT).put()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple quota system backed by memcache storage."""
# Memcache namespace to use.
_QUOTA_NAMESPACE = "quota"
# Offset all quota values by this amount since memcache incr/decr
# operate only with unsigned values.
_OFFSET = 2**32
class QuotaManager(object):
"""Simple quota system manager, backed by memcache storage.
Since memcache storage is not reliable, this quota system is not reliable and
best effort only.
Quota is managed by buckets. Each bucket contains a 32-bit int value of
available quota. Buckets should be refilled manually with 'put' method.
It is safe to use a single bucket from multiple clients simultaneously.
"""
def __init__(self, memcache_client):
"""Initialize new instance.
Args:
memcache_client: an instance of memcache client to use.
"""
self.memcache_client = memcache_client
def put(self, bucket, amount):
"""Put amount into quota bucket.
Args:
bucket: quota bucket as string.
amount: amount to bit put into quota as int.
"""
self.memcache_client.incr(bucket, delta=amount,
initial_value=_OFFSET, namespace=_QUOTA_NAMESPACE)
def consume(self, bucket, amount, consume_some=False):
"""Consume amount from quota bucket.
Args:
bucket: quota bucket as string.
amount: amount to consume.
consume_some: specifies behavior in case of not enough quota. If False,
the method will leave quota intact and return 0. If True, will try to
consume as much as possible.
Returns:
Amount of quota consumed.
"""
new_quota = self.memcache_client.decr(
bucket, delta=amount, initial_value=_OFFSET, namespace=_QUOTA_NAMESPACE)
if new_quota >= _OFFSET:
return amount
if consume_some and _OFFSET - new_quota < amount:
# we still can consume some
self.put(bucket, _OFFSET - new_quota)
return amount - (_OFFSET - new_quota)
else:
self.put(bucket, amount)
return 0
def get(self, bucket):
"""Get current bucket amount.
Args:
bucket: quota bucket as string.
Returns:
current bucket amount as int.
"""
amount = self.memcache_client.get(bucket, namespace=_QUOTA_NAMESPACE)
if amount:
return int(amount) - _OFFSET
else:
return 0
def set(self, bucket, amount):
"""Set bucket amount.
Args:
bucket: quota bucket as string.
amount: new bucket amount as int.
"""
self.memcache_client.set(bucket, amount + _OFFSET,
namespace=_QUOTA_NAMESPACE)
class QuotaConsumer(object):
"""Quota consumer wrapper for efficient quota consuming/reclaiming.
Quota is consumed in batches and put back in dispose() method.
WARNING: Always call the dispose() method if you need to keep quota
consistent.
"""
def __init__(self, quota_manager, bucket, batch_size):
"""Initialize new instance.
Args:
quota_manager: quota manager to use for quota operations as QuotaManager.
bucket: quota bucket name as string.
batch_size: batch size for quota consuming as int.
"""
self.quota_manager = quota_manager
self.batch_size = batch_size
self.bucket = bucket
self.quota = 0
def consume(self, amount=1):
"""Consume quota.
Args:
amount: amount of quota to be consumed as int.
Returns:
True if quota was successfully consumed, False if there's not enough
quota.
"""
while self.quota < amount:
delta = self.quota_manager.consume(self.bucket, self.batch_size,
consume_some=True)
if not delta:
return False
self.quota += delta
self.quota -= amount
return True
def put(self, amount=1):
"""Put quota back.
Args:
amount: amount of quota as int.
"""
self.quota += amount
def check(self, amount=1):
"""Check that we have enough quota right now.
This doesn't lock or consume the quota. Following consume might in fact
fail/succeeded.
Args:
amount: amount of quota to check.
Returns:
True if we have enough quota to consume specified amount right now. False
otherwise.
"""
if self.quota >= amount:
return True
return self.quota + self.quota_manager.get(self.bucket) >= amount
def dispose(self):
"""Dispose QuotaConsumer and put all actually unconsumed quota back.
This method has to be called for quota consistency!
"""
self.quota_manager.put(self.bucket, self.quota)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines input readers for MapReduce."""
# pylint: disable-msg=C6409
import logging
import math
import StringIO
import zipfile
from google.appengine.api import datastore
from mapreduce.lib import blobstore
from google.appengine.ext import db
from mapreduce.lib import key_range
from mapreduce import util
from mapreduce.model import JsonMixin
class Error(Exception):
"""Base-class for exceptions in this module."""
class BadReaderParamsError(Error):
"""The input parameters to a reader were invalid."""
class InputReader(JsonMixin):
"""Abstract base class for input readers.
InputReaders have the following properties:
* They are created by using the split_input method to generate a set of
InputReaders from a MapperSpec.
* They generate inputs to the mapper via the iterator interface.
* After creation, they can be serialized and resumed using the JsonMixin
interface.
* They are cast to string for a user-readable description; it may be
valuable to implement __str__.
"""
# Mapreduce parameters.
_APP_PARAM = "_app"
MAPPER_PARAMS = "mapper_params"
def __iter__(self):
return self
def next(self):
"""Returns the next input from this input reader as a key, value pair.
Returns:
The next input from this input reader.
"""
raise NotImplementedError
@classmethod
def from_json(cls, input_shard_state):
"""Creates an instance of the InputReader for the given input shard state.
Args:
input_shard_state: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
raise NotImplementedError
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
raise NotImplementedError
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader.
Returns:
A list of InputReaders.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
raise NotImplementedError
# TODO(user): Use cursor API as soon as we have it available.
class DatastoreInputReader(InputReader):
"""Represents a range in query results.
DatastoreInputReader yields model instances from the entities in a given key
range. Iterating over DatastoreInputReader changes its range past consumed
entries.
The class shouldn't be instantiated directly. Use the split_input class method
instead.
"""
# Number of entities to fetch at once while doing scanning.
_BATCH_SIZE = 50
# Maximum number of shards we'll create.
_MAX_SHARD_COUNT = 256
# Mapreduce parameters.
ENTITY_KIND_PARAM = "entity_kind"
KEYS_ONLY_PARAM = "keys_only"
BATCH_SIZE_PARAM = "batch_size"
KEY_RANGE_PARAM = "key_range"
# TODO(user): Add support for arbitrary queries. It's not possible to
# support them without cursors since right now you can't even serialize query
# definition.
def __init__(self, entity_kind, key_range_param, mapper_params):
"""Create new DatastoreInputReader object.
This is internal constructor. Use split_query instead.
Args:
entity_kind: entity kind as string.
key_range_param: key range to process as key_range.KeyRange.
mapper_params: mapper parameters as defined by user.
"""
self._entity_kind = entity_kind
self._key_range = key_range_param
self._mapper_params = mapper_params
self._batch_size = int(self._mapper_params.get(
self.BATCH_SIZE_PARAM, self._BATCH_SIZE))
def __iter__(self):
"""Create a generator for model instances for entities.
Iterating through entities moves query range past the consumed entities.
Yields:
next model instance.
"""
while True:
query = self._key_range.make_ascending_query(
util.for_name(self._entity_kind))
results = query.fetch(limit=self._batch_size)
if not results:
break
for model_instance in results:
key = model_instance.key()
self._key_range.advance(key)
yield model_instance
# TODO(user): use query splitting functionality when it becomes available
# instead.
@classmethod
def _split_input_from_params(cls, app, entity_kind_name,
params, shard_count):
"""Return input reader objects. Helper for split_input."""
raw_entity_kind = util.get_short_name(entity_kind_name)
# we use datastore.Query instead of ext.db.Query here, because we can't
# erase ordering on db.Query once we set it.
ds_query = datastore.Query(kind=raw_entity_kind, _app=app, keys_only=True)
ds_query.Order("__key__")
first_entity_key_list = ds_query.Get(1)
if not first_entity_key_list:
return []
first_entity_key = first_entity_key_list[0]
ds_query.Order(("__key__", datastore.Query.DESCENDING))
try:
last_entity_key, = ds_query.Get(1)
except db.NeedIndexError, e:
# TODO(user): Show this error in the worker log, not the app logs.
logging.warning("Cannot create accurate approximation of keyspace, "
"guessing instead. Please address this problem: %s", e)
# TODO(user): Use a key-end hint from the user input parameters
# in this case, in the event the user has a good way of figuring out
# the range of the keyspace.
last_entity_key = key_range.KeyRange.guess_end_key(raw_entity_kind,
first_entity_key)
full_keyrange = key_range.KeyRange(
first_entity_key, last_entity_key, None, True, True, _app=app)
key_ranges = [full_keyrange]
number_of_half_splits = int(math.floor(math.log(shard_count, 2)))
for _ in range(0, number_of_half_splits):
new_ranges = []
for r in key_ranges:
new_ranges += r.split_range(1)
key_ranges = new_ranges
return [cls(entity_kind_name, r, params) for r in key_ranges]
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
A list of InputReader objects of length <= number_of_shards. These
may be DatastoreInputReader or DatastoreKeyInputReader objects.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
# keys_only remains for backwards compatability. It may go away.
keys_only = util.parse_bool(params.get(cls.KEYS_ONLY_PARAM, False))
if keys_only:
raise BadReaderParamsError("The keys_only parameter is obsolete. "
"Use DatastoreKeyInputReader instead.")
# Fail fast if Model cannot be located.
util.for_name(entity_kind_name)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
def to_json(self):
"""Serializes all the data in this query range into json form.
Returns:
all the data in json-compatible map.
"""
json_dict = {self.KEY_RANGE_PARAM: self._key_range.to_json(),
self.ENTITY_KIND_PARAM: self._entity_kind,
self.MAPPER_PARAMS: self._mapper_params}
return json_dict
def __str__(self):
"""Returns the string representation of this DatastoreInputReader."""
return repr(self._key_range)
@classmethod
def from_json(cls, json):
"""Create new DatastoreInputReader from the json, encoded by to_json.
Args:
json: json map representation of DatastoreInputReader.
Returns:
an instance of DatastoreInputReader with all data deserialized from json.
"""
query_range = cls(json[cls.ENTITY_KIND_PARAM],
key_range.KeyRange.from_json(json[cls.KEY_RANGE_PARAM]),
json[cls.MAPPER_PARAMS])
return query_range
class DatastoreKeyInputReader(DatastoreInputReader):
"""An input reader which takes a Kind and yields Keys for that kind."""
def __iter__(self):
"""Create a generator for keys in the range.
Iterating through entries moves query range past the consumed entries.
Yields:
next entry.
"""
while True:
raw_entity_kind = util.get_short_name(self._entity_kind)
query = self._key_range.make_ascending_datastore_query(
raw_entity_kind, keys_only=True)
results = query.Get(limit=self._batch_size)
if not results:
break
for key in results:
self._key_range.advance(key)
yield key
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
A list of DatastoreKeyInputReader objects of length <= number_of_shards.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
class DatastoreEntityInputReader(DatastoreInputReader):
"""An input reader which yields low level datastore entities for a kind."""
def __iter__(self):
"""Create a generator for low level entities in the range.
Iterating through entries moves query range past the consumed entries.
Yields:
next entry.
"""
while True:
raw_entity_kind = util.get_short_name(self._entity_kind)
query = self._key_range.make_ascending_datastore_query(raw_entity_kind)
results = query.Get(limit=self._batch_size)
if not results:
break
for entity in results:
self._key_range.advance(entity.key())
yield entity
@classmethod
def split_input(cls, mapper_spec):
"""Splits query into shards without fetching query results.
Tries as best as it can to split the whole query result set into equal
shards. Due to difficulty of making the perfect split, resulting shards'
sizes might differ significantly from each other. The actual number of
shards might also be less then requested (even 1), though it is never
greater.
Current implementation does key-lexicographic order splitting. It requires
query not to specify any __key__-based ordering. If an index for
query.order('-__key__') query is not present, an inaccurate guess at
sharding will be made by splitting the full key range.
Args:
mapper_spec: MapperSpec with params containing 'entity_kind'.
May also have 'batch_size' in the params to specify the number
of entities to process in each batch.
Returns:
List of DatastoreEntityInputReader objects of length <= number_of_shards.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Input reader class mismatch")
params = mapper_spec.params
if cls.ENTITY_KIND_PARAM not in params:
raise BadReaderParamsError("Missing mapper parameter 'entity_kind'")
entity_kind_name = params[cls.ENTITY_KIND_PARAM]
shard_count = mapper_spec.shard_count
app = params.get(cls._APP_PARAM)
return cls._split_input_from_params(
app, entity_kind_name, params, shard_count)
class BlobstoreLineInputReader(InputReader):
"""Input reader for a newline delimited blob in Blobstore."""
# TODO(user): Should we set this based on MAX_BLOB_FETCH_SIZE?
_BLOB_BUFFER_SIZE = 64000
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parmaeters.
INITIAL_POSITION_PARAM = "initial_position"
END_POSITION_PARAM = "end_position"
BLOB_KEY_PARAM = "blob_key"
def __init__(self, blob_key, start_position, end_position):
"""Initializes this instance with the given blob key and character range.
This BlobstoreInputReader will read from the first record starting after
strictly after start_position until the first record ending at or after
end_position (exclusive). As an exception, if start_position is 0, then
this InputReader starts reading at the first record.
Args:
blob_key: the BlobKey that this input reader is processing.
start_position: the position to start reading at.
end_position: a position in the last record to read.
"""
self._blob_key = blob_key
self._blob_reader = blobstore.BlobReader(blob_key,
self._BLOB_BUFFER_SIZE,
start_position)
self._end_position = end_position
self._has_iterated = False
self._read_before_start = bool(start_position)
def next(self):
"""Returns the next input from as an (offset, line) tuple."""
self._has_iterated = True
if self._read_before_start:
self._blob_reader.readline()
self._read_before_start = False
start_position = self._blob_reader.tell()
if start_position >= self._end_position:
raise StopIteration()
line = self._blob_reader.readline()
if not line:
raise StopIteration()
return start_position, line.rstrip("\n")
def to_json(self):
"""Returns an json-compatible input shard spec for remaining inputs."""
new_pos = self._blob_reader.tell()
if self._has_iterated:
new_pos -= 1
return {self.BLOB_KEY_PARAM: self._blob_key,
self.INITIAL_POSITION_PARAM: new_pos,
self.END_POSITION_PARAM: self._end_position}
def __str__(self):
"""Returns the string representation of this BlobstoreLineInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._blob_reader.tell(), self._end_position)
@classmethod
def from_json(cls, json):
"""Instantiates an instance of this InputReader for the given shard spec."""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.INITIAL_POSITION_PARAM],
json[cls.END_POSITION_PARAM])
@classmethod
def split_input(cls, mapper_spec):
"""Returns a list of shard_count input_spec_shards for input_spec.
Args:
mapper_spec: The mapper specification to split from. Must contain
'blob_keys' parameter with one or more blob keys.
Returns:
A list of BlobstoreInputReaders corresponding to the specified shards.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = mapper_spec.params
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_keys' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
blob_sizes = {}
for blob_key in blob_keys:
blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key))
blob_sizes[blob_key] = blob_info.size
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
shards_per_blob = shard_count // len(blob_keys)
if shards_per_blob == 0:
shards_per_blob = 1
chunks = []
for blob_key, blob_size in blob_sizes.items():
blob_chunk_size = blob_size // shards_per_blob
for i in xrange(shards_per_blob - 1):
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * i,
cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)}))
chunks.append(BlobstoreLineInputReader.from_json(
{cls.BLOB_KEY_PARAM: blob_key,
cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1),
cls.END_POSITION_PARAM: blob_size}))
return chunks
class BlobstoreZipInputReader(InputReader):
"""Input reader for files from a zip archive stored in the Blobstore.
Each instance of the reader will read the TOC, from the end of the zip file,
and then only the contained files which it is responsible for.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Mapreduce parameters.
BLOB_KEY_PARAM = "blob_key"
START_INDEX_PARAM = "start_index"
END_INDEX_PARAM = "end_index"
def __init__(self, blob_key, start_index, end_index,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipInputReader will read from the file with index start_index
up to but not including the file with index end_index.
Args:
blob_key: the BlobKey that this input reader is processing.
start_index: the index of the first file to read.
end_index: the index of the first file that will not be read.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_index = start_index
self._end_index = end_index
self._reader = _reader
self._zip = None
self._entries = None
def next(self):
"""Returns the next input from this input reader as (ZipInfo, opener) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple is a zipfile.ZipInfo object.
The second element of the tuple is a zero-argument function that, when
called, returns the complete body of the file.
"""
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_index:self._end_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
self._start_index += 1
return (entry, lambda: self._zip.read(entry.filename))
@classmethod
def from_json(cls, json):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_INDEX_PARAM],
json[cls.END_INDEX_PARAM])
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_INDEX_PARAM: self._start_index,
self.END_INDEX_PARAM: self._end_index}
def __str__(self):
"""Returns the string representation of this BlobstoreZipInputReader."""
return "blobstore.BlobKey(%r):[%d, %d]" % (
self._blob_key, self._start_index, self._end_index)
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input shard states for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_key' parameter with one blob key.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning files within the zip.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = mapper_spec.params
if cls.BLOB_KEY_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_key = params[cls.BLOB_KEY_PARAM]
zip_input = zipfile.ZipFile(_reader(blob_key))
files = zip_input.infolist()
total_size = sum(x.file_size for x in files)
num_shards = min(mapper_spec.shard_count, cls._MAX_SHARD_COUNT)
size_per_shard = total_size // num_shards
# Break the list of files into sublists, each of approximately
# size_per_shard bytes.
shard_start_indexes = [0]
current_shard_size = 0
for i, fileinfo in enumerate(files):
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
shard_start_indexes.append(i + 1)
current_shard_size = 0
if shard_start_indexes[-1] != len(files):
shard_start_indexes.append(len(files))
return [cls(blob_key, start_index, end_index, _reader)
for start_index, end_index
in zip(shard_start_indexes, shard_start_indexes[1:])]
class BlobstoreZipLineInputReader(InputReader):
"""Input reader for newline delimited files in zip archives from Blobstore.
This has the same external interface as the BlobstoreLineInputReader, in that
it takes a list of blobs as its input and yields lines to the reader.
However the blobs themselves are expected to be zip archives of line delimited
files instead of the files themselves.
This is useful as many line delimited files gain greatly from compression.
"""
# Maximum number of shards to allow.
_MAX_SHARD_COUNT = 256
# Maximum number of blobs to allow.
_MAX_BLOB_KEYS_COUNT = 246
# Mapreduce parameters.
BLOB_KEYS_PARAM = "blob_keys"
# Serialization parameters.
BLOB_KEY_PARAM = "blob_key"
START_FILE_INDEX_PARAM = "start_file_index"
END_FILE_INDEX_PARAM = "end_file_index"
OFFSET_PARAM = "offset"
def __init__(self, blob_key, start_file_index, end_file_index, offset,
_reader=blobstore.BlobReader):
"""Initializes this instance with the given blob key and file range.
This BlobstoreZipLineInputReader will read from the file with index
start_file_index up to but not including the file with index end_file_index.
It will return lines starting at offset within file[start_file_index]
Args:
blob_key: the BlobKey that this input reader is processing.
start_file_index: the index of the first file to read within the zip.
end_file_index: the index of the first file that will not be read.
offset: the byte offset within blob_key.zip[start_file_index] to start
reading. The reader will continue to the end of the file.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
"""
self._blob_key = blob_key
self._start_file_index = start_file_index
self._end_file_index = end_file_index
self._initial_offset = offset
self._reader = _reader
self._zip = None
self._entries = None
self._filestream = None
@classmethod
def split_input(cls, mapper_spec, _reader=blobstore.BlobReader):
"""Returns a list of input readers for the input spec.
Args:
mapper_spec: The MapperSpec for this InputReader. Must contain
'blob_keys' parameter with one or more blob keys.
_reader: a callable that returns a file-like object for reading blobs.
Used for dependency injection.
Returns:
A list of InputReaders spanning the subfiles within the blobs.
There will be at least one reader per blob, but it will otherwise
attempt to keep the expanded size even.
Raises:
BadReaderParamsError: required parameters are missing or invalid.
"""
if mapper_spec.input_reader_class() != cls:
raise BadReaderParamsError("Mapper input reader class mismatch")
params = mapper_spec.params
if cls.BLOB_KEYS_PARAM not in params:
raise BadReaderParamsError("Must specify 'blob_key' for mapper input")
blob_keys = params[cls.BLOB_KEYS_PARAM]
if isinstance(blob_keys, basestring):
# This is a mechanism to allow multiple blob keys (which do not contain
# commas) in a single string. It may go away.
blob_keys = blob_keys.split(",")
if len(blob_keys) > cls._MAX_BLOB_KEYS_COUNT:
raise BadReaderParamsError("Too many 'blob_keys' for mapper input")
if not blob_keys:
raise BadReaderParamsError("No 'blob_keys' specified for mapper input")
blob_files = {}
total_size = 0
for blob_key in blob_keys:
zip_input = zipfile.ZipFile(_reader(blob_key))
blob_files[blob_key] = zip_input.infolist()
total_size += sum(x.file_size for x in blob_files[blob_key])
shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count)
# We can break on both blob key and file-within-zip boundaries.
# A shard will span at minimum a single blob key, but may only
# handle a few files within a blob.
size_per_shard = total_size // shard_count
readers = []
for blob_key in blob_keys:
files = blob_files[blob_key]
current_shard_size = 0
start_file_index = 0
next_file_index = 0
for fileinfo in files:
next_file_index += 1
current_shard_size += fileinfo.file_size
if current_shard_size >= size_per_shard:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
current_shard_size = 0
start_file_index = next_file_index
if current_shard_size != 0:
readers.append(cls(blob_key, start_file_index, next_file_index, 0,
_reader))
return readers
def next(self):
"""Returns the next line from this input reader as (lineinfo, line) tuple.
Returns:
The next input from this input reader, in the form of a 2-tuple.
The first element of the tuple describes the source, it is itself
a tuple (blobkey, filenumber, byteoffset).
The second element of the tuple is the line found at that offset.
"""
if not self._filestream:
if not self._zip:
self._zip = zipfile.ZipFile(self._reader(self._blob_key))
# Get a list of entries, reversed so we can pop entries off in order
self._entries = self._zip.infolist()[self._start_file_index:
self._end_file_index]
self._entries.reverse()
if not self._entries:
raise StopIteration()
entry = self._entries.pop()
value = self._zip.read(entry.filename)
self._filestream = StringIO.StringIO(value)
if self._initial_offset:
self._filestream.seek(self._initial_offset)
self._filestream.readline()
start_position = self._filestream.tell()
line = self._filestream.readline()
if not line:
# Done with this file in the zip. Move on to the next file.
self._filestream.close()
self._filestream = None
self._start_file_index += 1
self._initial_offset = 0
return self.next()
return ((self._blob_key, self._start_file_index, start_position),
line.rstrip("\n"))
def _next_offset(self):
"""Return the offset of the next line to read."""
if self._filestream:
offset = self._filestream.tell()
if offset:
offset -= 1
else:
offset = self._initial_offset
return offset
def to_json(self):
"""Returns an input shard state for the remaining inputs.
Returns:
A json-izable version of the remaining InputReader.
"""
return {self.BLOB_KEY_PARAM: self._blob_key,
self.START_FILE_INDEX_PARAM: self._start_file_index,
self.END_FILE_INDEX_PARAM: self._end_file_index,
self.OFFSET_PARAM: self._next_offset()}
@classmethod
def from_json(cls, json, _reader=blobstore.BlobReader):
"""Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
_reader: For dependency injection.
Returns:
An instance of the InputReader configured using the values of json.
"""
return cls(json[cls.BLOB_KEY_PARAM],
json[cls.START_FILE_INDEX_PARAM],
json[cls.END_FILE_INDEX_PARAM],
json[cls.OFFSET_PARAM],
_reader)
def __str__(self):
"""Returns the string representation of this reader.
Returns:
string blobkey:[start file num, end file num]:current offset.
"""
return "blobstore.BlobKey(%r):[%d, %d]:%d" % (
self._blob_key, self._start_file_index, self._end_file_index,
self._next_offset())
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main module for map-reduce implementation.
This module should be specified as a handler for mapreduce URLs in app.yaml:
handlers:
- url: /mapreduce(/.*)?
login: admin
script: mapreduce/main.py
"""
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from mapreduce import handlers
from mapreduce import status
class RedirectHandler(webapp.RequestHandler):
"""Redirects the user back to the status page."""
def get(self):
new_path = self.request.path
if not new_path.endswith('/'):
new_path += '/'
new_path += 'status'
self.redirect(new_path)
def create_application():
"""Create new WSGIApplication and register all handlers.
Returns:
an instance of webapp.WSGIApplication with all mapreduce handlers
registered.
"""
return webapp.WSGIApplication([
# Task queue handlers.
(r".*/worker_callback", handlers.MapperWorkerCallbackHandler),
(r".*/controller_callback", handlers.ControllerCallbackHandler),
(r".*/kickoffjob_callback", handlers.KickOffJobHandler),
# RPC requests with JSON responses
(r".*/command/start_job", handlers.StartJobHandler),
(r".*/command/cleanup_job", handlers.CleanUpJobHandler),
(r".*/command/abort_job", handlers.AbortJobHandler),
(r".*/command/list_configs", status.ListConfigsHandler),
(r".*/command/list_jobs", status.ListJobsHandler),
(r".*/command/get_job_detail", status.GetJobDetailHandler),
# Catch all redirects to status page.
(r"/[^/]+(?:/)?", RedirectHandler),
# UI static files
(r".+/([a-zA-Z0-9]+(?:\.(?:css|js))?)", status.ResourceHandler),
],
debug=True)
APP = create_application()
def main():
util.run_wsgi_app(APP)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce execution context.
Mapreduce context provides handler code with information about
current mapreduce execution and organizes utility data flow
from handlers such as counters, log messages, mutation pools.
"""
__all__ = ["MAX_ENTITY_COUNT", "MAX_POOL_SIZE", "Context", "MutationPool",
"Counters", "ItemList", "EntityList", "get", "COUNTER_MAPPER_CALLS"]
from google.appengine.api import datastore
from google.appengine.ext import db
# Maximum pool size in bytes. Pool will be flushed when reaches this amount.
# We use 950,000 bytes which is slightly less than maximum allowed RPC size of
# 1M to have some space cushion.
MAX_POOL_SIZE = 900 * 1000
# Maximum number of items. Pool will be flushed when reaches this amount.
MAX_ENTITY_COUNT = 500
# The name of the counter which counts all mapper calls.
COUNTER_MAPPER_CALLS = "mapper_calls"
def _normalize_entity(value):
"""Return an entity from an entity or model instance."""
# TODO(user): Consider using datastore.NormalizeAndTypeCheck.
if getattr(value, "_populate_internal_entity", None):
return value._populate_internal_entity()
return value
def _normalize_key(value):
"""Return a key from an entity, model instance, key, or key string."""
if getattr(value, "key", None):
return value.key()
elif isinstance(value, basestring):
return datastore.Key(value)
else:
return value
class ItemList(object):
"""Holds list of arbitrary items, and their total size.
Properties:
items: list of objects.
length: length of item list.
size: aggregate item size in bytes.
"""
def __init__(self):
"""Constructor."""
self.items = []
self.length = 0
self.size = 0
def append(self, item, item_size):
"""Add new item to the list.
Args:
item: an item to add to the list.
item_size: item size in bytes as int.
"""
self.items.append(item)
self.length += 1
self.size += item_size
def clear(self):
"""Clear item list."""
self.items = []
self.length = 0
self.size = 0
@property
def entities(self):
"""Return items. For backwards compatability."""
return self.items
# For backwards compatability.
EntityList = ItemList
# TODO(user): mutation pool has no error handling at all. Add some.
class MutationPool(object):
"""Mutation pool accumulates datastore changes to perform them in batch.
Properties:
puts: ItemList of entities to put to datastore.
deletes: ItemList of keys to delete from datastore.
max_pool_size: maximum single list pool size. List changes will be flushed
when this size is reached.
"""
def __init__(self, max_pool_size=MAX_POOL_SIZE):
"""Constructor.
Args:
max_pool_size: maximum pools size in bytes before flushing it to db.
"""
self.max_pool_size = max_pool_size
self.puts = ItemList()
self.deletes = ItemList()
def put(self, entity):
"""Registers entity to put to datastore.
Args:
entity: an entity or model instance to put.
"""
actual_entity = _normalize_entity(entity)
entity_size = len(actual_entity._ToPb().Encode())
if (self.puts.length >= MAX_ENTITY_COUNT or
(self.puts.size + entity_size) > self.max_pool_size):
self.__flush_puts()
self.puts.append(actual_entity, entity_size)
def delete(self, entity):
"""Registers entity to delete from datastore.
Args:
entity: an entity, model instance, or key to delete.
"""
# This is not very nice: we're calling two protected methods here...
key = _normalize_key(entity)
key_size = len(key._ToPb().Encode())
if (self.deletes.length >= MAX_ENTITY_COUNT or
(self.deletes.size + key_size) > self.max_pool_size):
self.__flush_deletes()
self.deletes.append(key, key_size)
# TODO(user): some kind of error handling/retries is needed here.
def flush(self):
"""Flush(apply) all changed to datastore."""
self.__flush_puts()
self.__flush_deletes()
def __flush_puts(self):
"""Flush all puts to datastore."""
datastore.Put(self.puts.items)
self.puts.clear()
def __flush_deletes(self):
"""Flush all deletes to datastore."""
datastore.Delete(self.deletes.items)
self.deletes.clear()
# This doesn't do much yet. In future it will play nicely with checkpoint/error
# handling system.
class Counters(object):
"""Regulates access to counters."""
def __init__(self, shard_state):
"""Constructor.
Args:
shard_state: current mapreduce shard state as model.ShardState.
"""
self._shard_state = shard_state
def increment(self, counter_name, delta=1):
"""Increment counter value.
Args:
counter_name: name of the counter as string.
delta: increment delta as int.
"""
self._shard_state.counters_map.increment(counter_name, delta)
def flush(self):
"""Flush unsaved counter values."""
pass
class Context(object):
"""MapReduce execution context.
Properties:
mapreduce_spec: current mapreduce specification as model.MapreduceSpec.
shard_state: current shard state as model.ShardState.
mutation_pool: current mutation pool as MutationPool.
counters: counters object as Counters.
"""
# Current context instance
_context_instance = None
def __init__(self, mapreduce_spec, shard_state):
"""Constructor.
Args:
mapreduce_spec: mapreduce specification as model.MapreduceSpec.
shard_state: shard state as model.ShardState.
"""
# TODO(user): Make these properties protected
self.mapreduce_spec = mapreduce_spec
self.shard_state = shard_state
# TODO(user): These properties can stay public.
self.mutation_pool = MutationPool()
self.counters = Counters(shard_state)
self._pools = {}
self.register_pool("mutation_pool", self.mutation_pool)
self.register_pool("counters", self.counters)
def flush(self):
"""Flush all information recorded in context."""
for pool in self._pools.values():
pool.flush()
if self.shard_state:
self.shard_state.put()
# TODO(user): Add convenience method for mapper params.
# TODO(user): Add fatal error logging method here. Will log the message
# and set the shard state to failure result status, which the controller
# callback should pick up and force all shards to terminate.
def register_pool(self, key, pool):
"""Register an arbitrary pool to be flushed together with this context.
Args:
key: pool key as string.
pool: a pool instance. Pool should implement flush(self) method.
"""
self._pools[key] = pool
def get_pool(self, key):
"""Obtains an instance of registered pool.
Args:
key: pool key as string.
Returns:
an instance of the pool registered earlier, or None.
"""
return self._pools.get(key, None)
@classmethod
def _set(cls, context):
"""Set current context instance.
Args:
context: new context as Context or None.
"""
cls._context_instance = context
def get():
"""Get current context instance.
Returns:
current context as Context.
"""
return Context._context_instance
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the mapreduce library."""
__all__ = ["for_name", "is_generator_function", "get_short_name", "parse_bool"]
import inspect
import logging
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
# if "." not in fq_name:
# raise ImportError("'%s' is not a full-qualified name" % fq_name)
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the mapreduce library."""
__all__ = ["for_name", "is_generator_function", "get_short_name", "parse_bool"]
import inspect
import logging
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
# if "." not in fq_name:
# raise ImportError("'%s' is not a full-qualified name" % fq_name)
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counters-related operations."""
__all__ = ['Increment']
class Increment(object):
"""Increment counter operation."""
def __init__(self, counter_name, delta=1):
"""Constructor.
Args:
counter_name: name of the counter as string
delta: increment delta as int.
"""
self.counter_name = counter_name
self.delta = delta
def __call__(self, context):
"""Execute operation.
Args:
context: mapreduce context as context.Context.
"""
context.counters.increment(self.counter_name, self.delta)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations which can be yielded from mappers.
Operation is callable that takes context.Context as a parameter.
Operations are called during mapper execution immediately
on recieving from handler function.
"""
import db
import counters
__all__ = ['db', 'counters']
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DB-related operations."""
__all__ = ['Put', 'Delete']
# TODO(user): handler function annotation which requests to
# use db calls directly without batching them/doing async db calls.
class Put(object):
"""Put entity into datastore via mutation_pool.
See mapreduce.context.MutationPool.
"""
def __init__(self, entity):
"""Constructor.
Args:
entity: an entity to put.
"""
self.entity = entity
def __call__(self, context):
"""Perform operation.
Args:
context: mapreduce context as context.Context.
"""
context.mutation_pool.put(self.entity)
class Delete(object):
"""Delete entity from datastore via mutation_pool.
See mapreduce.context.MutationPool.
"""
def __init__(self, entity):
"""Constructor.
Args:
entity: a key or model instance to delete.
"""
self.entity = entity
def __call__(self, context):
"""Perform operation.
Args:
context: mapreduce context as context.Context.
"""
context.mutation_pool.delete(self.entity)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counters-related operations."""
__all__ = ['Increment']
class Increment(object):
"""Increment counter operation."""
def __init__(self, counter_name, delta=1):
"""Constructor.
Args:
counter_name: name of the counter as string
delta: increment delta as int.
"""
self.counter_name = counter_name
self.delta = delta
def __call__(self, context):
"""Execute operation.
Args:
context: mapreduce context as context.Context.
"""
context.counters.increment(self.counter_name, self.delta)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DB-related operations."""
__all__ = ['Put', 'Delete']
# TODO(user): handler function annotation which requests to
# use db calls directly without batching them/doing async db calls.
class Put(object):
"""Put entity into datastore via mutation_pool.
See mapreduce.context.MutationPool.
"""
def __init__(self, entity):
"""Constructor.
Args:
entity: an entity to put.
"""
self.entity = entity
def __call__(self, context):
"""Perform operation.
Args:
context: mapreduce context as context.Context.
"""
context.mutation_pool.put(self.entity)
class Delete(object):
"""Delete entity from datastore via mutation_pool.
See mapreduce.context.MutationPool.
"""
def __init__(self, entity):
"""Constructor.
Args:
entity: a key or model instance to delete.
"""
self.entity = entity
def __call__(self, context):
"""Perform operation.
Args:
context: mapreduce context as context.Context.
"""
context.mutation_pool.delete(self.entity)
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Operations which can be yielded from mappers.
Operation is callable that takes context.Context as a parameter.
Operations are called during mapper execution immediately
on recieving from handler function.
"""
import db
import counters
__all__ = ['db', 'counters']
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base handler class for all mapreduce handlers.
"""
import logging
from mapreduce.lib import simplejson
from google.appengine.ext import webapp
class BaseHandler(webapp.RequestHandler):
"""Base class for all mapreduce handlers."""
def base_path(self):
"""Base path for all mapreduce-related urls."""
path = self.request.path
return path[:path.rfind("/")]
class JsonHandler(BaseHandler):
"""Base class for JSON handlers for user interface.
Sub-classes should implement the 'handle' method. They should put their
response data in the 'self.json_response' dictionary. Any exceptions raised
by the sub-class implementation will be sent in a JSON response with the
name of the error_class and the error_message.
"""
def __init__(self):
"""Initializer."""
super(BaseHandler, self).__init__()
self.json_response = {}
def get(self):
self.post()
def post(self):
self.json_response.clear()
try:
self.handle()
except Exception, e:
logging.exception("Error in JsonHandler, returning exception.")
# TODO(user): Include full traceback here for the end-user.
self.json_response.clear()
self.json_response["error_class"] = e.__class__.__name__
self.json_response["error_message"] = str(e)
self.response.headers["Content-Type"] = "text/javascript"
try:
output = simplejson.dumps(self.json_response)
except:
logging.exception("Could not serialize to JSON")
self.response.set_status(500, message="Could not serialize to JSON")
return
else:
self.response.out.write(output)
def handle(self):
"""To be implemented by sub-classes."""
raise NotImplementedError()
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapreduce execution context.
Mapreduce context provides handler code with information about
current mapreduce execution and organizes utility data flow
from handlers such as counters, log messages, mutation pools.
"""
__all__ = ["MAX_ENTITY_COUNT", "MAX_POOL_SIZE", "Context", "MutationPool",
"Counters", "ItemList", "EntityList", "get", "COUNTER_MAPPER_CALLS"]
from google.appengine.api import datastore
from google.appengine.ext import db
# Maximum pool size in bytes. Pool will be flushed when reaches this amount.
# We use 950,000 bytes which is slightly less than maximum allowed RPC size of
# 1M to have some space cushion.
MAX_POOL_SIZE = 900 * 1000
# Maximum number of items. Pool will be flushed when reaches this amount.
MAX_ENTITY_COUNT = 500
# The name of the counter which counts all mapper calls.
COUNTER_MAPPER_CALLS = "mapper_calls"
def _normalize_entity(value):
"""Return an entity from an entity or model instance."""
# TODO(user): Consider using datastore.NormalizeAndTypeCheck.
if getattr(value, "_populate_internal_entity", None):
return value._populate_internal_entity()
return value
def _normalize_key(value):
"""Return a key from an entity, model instance, key, or key string."""
if getattr(value, "key", None):
return value.key()
elif isinstance(value, basestring):
return datastore.Key(value)
else:
return value
class ItemList(object):
"""Holds list of arbitrary items, and their total size.
Properties:
items: list of objects.
length: length of item list.
size: aggregate item size in bytes.
"""
def __init__(self):
"""Constructor."""
self.items = []
self.length = 0
self.size = 0
def append(self, item, item_size):
"""Add new item to the list.
Args:
item: an item to add to the list.
item_size: item size in bytes as int.
"""
self.items.append(item)
self.length += 1
self.size += item_size
def clear(self):
"""Clear item list."""
self.items = []
self.length = 0
self.size = 0
@property
def entities(self):
"""Return items. For backwards compatability."""
return self.items
# For backwards compatability.
EntityList = ItemList
# TODO(user): mutation pool has no error handling at all. Add some.
class MutationPool(object):
"""Mutation pool accumulates datastore changes to perform them in batch.
Properties:
puts: ItemList of entities to put to datastore.
deletes: ItemList of keys to delete from datastore.
max_pool_size: maximum single list pool size. List changes will be flushed
when this size is reached.
"""
def __init__(self, max_pool_size=MAX_POOL_SIZE):
"""Constructor.
Args:
max_pool_size: maximum pools size in bytes before flushing it to db.
"""
self.max_pool_size = max_pool_size
self.puts = ItemList()
self.deletes = ItemList()
def put(self, entity):
"""Registers entity to put to datastore.
Args:
entity: an entity or model instance to put.
"""
actual_entity = _normalize_entity(entity)
entity_size = len(actual_entity._ToPb().Encode())
if (self.puts.length >= MAX_ENTITY_COUNT or
(self.puts.size + entity_size) > self.max_pool_size):
self.__flush_puts()
self.puts.append(actual_entity, entity_size)
def delete(self, entity):
"""Registers entity to delete from datastore.
Args:
entity: an entity, model instance, or key to delete.
"""
# This is not very nice: we're calling two protected methods here...
key = _normalize_key(entity)
key_size = len(key._ToPb().Encode())
if (self.deletes.length >= MAX_ENTITY_COUNT or
(self.deletes.size + key_size) > self.max_pool_size):
self.__flush_deletes()
self.deletes.append(key, key_size)
# TODO(user): some kind of error handling/retries is needed here.
def flush(self):
"""Flush(apply) all changed to datastore."""
self.__flush_puts()
self.__flush_deletes()
def __flush_puts(self):
"""Flush all puts to datastore."""
datastore.Put(self.puts.items)
self.puts.clear()
def __flush_deletes(self):
"""Flush all deletes to datastore."""
datastore.Delete(self.deletes.items)
self.deletes.clear()
# This doesn't do much yet. In future it will play nicely with checkpoint/error
# handling system.
class Counters(object):
"""Regulates access to counters."""
def __init__(self, shard_state):
"""Constructor.
Args:
shard_state: current mapreduce shard state as model.ShardState.
"""
self._shard_state = shard_state
def increment(self, counter_name, delta=1):
"""Increment counter value.
Args:
counter_name: name of the counter as string.
delta: increment delta as int.
"""
self._shard_state.counters_map.increment(counter_name, delta)
def flush(self):
"""Flush unsaved counter values."""
pass
class Context(object):
"""MapReduce execution context.
Properties:
mapreduce_spec: current mapreduce specification as model.MapreduceSpec.
shard_state: current shard state as model.ShardState.
mutation_pool: current mutation pool as MutationPool.
counters: counters object as Counters.
"""
# Current context instance
_context_instance = None
def __init__(self, mapreduce_spec, shard_state):
"""Constructor.
Args:
mapreduce_spec: mapreduce specification as model.MapreduceSpec.
shard_state: shard state as model.ShardState.
"""
# TODO(user): Make these properties protected
self.mapreduce_spec = mapreduce_spec
self.shard_state = shard_state
# TODO(user): These properties can stay public.
self.mutation_pool = MutationPool()
self.counters = Counters(shard_state)
self._pools = {}
self.register_pool("mutation_pool", self.mutation_pool)
self.register_pool("counters", self.counters)
def flush(self):
"""Flush all information recorded in context."""
for pool in self._pools.values():
pool.flush()
if self.shard_state:
self.shard_state.put()
# TODO(user): Add convenience method for mapper params.
# TODO(user): Add fatal error logging method here. Will log the message
# and set the shard state to failure result status, which the controller
# callback should pick up and force all shards to terminate.
def register_pool(self, key, pool):
"""Register an arbitrary pool to be flushed together with this context.
Args:
key: pool key as string.
pool: a pool instance. Pool should implement flush(self) method.
"""
self._pools[key] = pool
def get_pool(self, key):
"""Obtains an instance of registered pool.
Args:
key: pool key as string.
Returns:
an instance of the pool registered earlier, or None.
"""
return self._pools.get(key, None)
@classmethod
def _set(cls, context):
"""Set current context instance.
Args:
context: new context as Context or None.
"""
cls._context_instance = context
def get():
"""Get current context instance.
Returns:
current context as Context.
"""
return Context._context_instance
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Key range representation and splitting."""
import os
try:
from mapreduce.lib import simplejson
except ImportError:
simplejson = None
from google.appengine.api import datastore
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
class Error(Exception):
"""Base class for exceptions in this module."""
class KeyRangeError(Error):
"""Error while trying to generate a KeyRange."""
class SimplejsonUnavailableError(Error):
"""Error while using json functionality whith unavailable simplejson."""
class KeyRange(object):
"""Represents a range of keys in the datastore.
A KeyRange object represents a key range
(key_start, include_start, key_end, include_end)
and a scan direction (KeyRange.DESC or KeyRange.ASC).
"""
DESC = "DESC"
ASC = "ASC"
def __init__(self,
key_start=None,
key_end=None,
direction=None,
include_start=True,
include_end=True,
_app=None):
"""Initialize a KeyRange object.
Args:
key_start: The starting key for this range.
key_end: The ending key for this range.
direction: The direction of the query for this range.
include_start: Whether the start key should be included in the range.
include_end: Whether the end key should be included in the range.
"""
if direction is None:
direction = KeyRange.ASC
assert direction in (KeyRange.ASC, KeyRange.DESC)
self.direction = direction
self.key_start = key_start
self.key_end = key_end
self.include_start = include_start
self.include_end = include_end
self._app = _app
def __str__(self):
if self.include_start:
left_side = "["
else:
left_side = "("
if self.include_end:
right_side = "]"
else:
right_side = "("
return "%s%s%s to %s%s" % (self.direction, left_side, repr(self.key_start),
repr(self.key_end), right_side)
def __repr__(self):
return ("key_range.KeyRange(key_start=%s,key_end=%s,direction=%s,"
"include_start=%s,include_end=%s)") % (repr(self.key_start),
repr(self.key_end),
repr(self.direction),
repr(self.include_start),
repr(self.include_end))
def advance(self, key):
"""Updates the start of the range immediately past the specified key.
Args:
key: A db.Key.
"""
self.include_start = False
self.key_start = key
def filter_query(self, query):
"""Add query filter to restrict to this key range.
Args:
query: A db.Query instance.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, db.Query)
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.filter("__key__ %s" % start_comparator, self.key_start)
if self.key_end:
query.filter("__key__ %s" % end_comparator, self.key_end)
return query
def filter_datastore_query(self, query):
"""Add query filter to restrict to this key range.
Args:
query: A datastore.Query instance.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, datastore.Query)
if self._app:
query.__app = self._app
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.update({"__key__ %s" % start_comparator: self.key_start})
if self.key_end:
query.update({"__key__ %s" % end_comparator: self.key_end})
return query
def __get_direction(self, asc, desc):
"""Check that self.direction is in (KeyRange.ASC, KeyRange.DESC).
Args:
asc: Argument to return if self.direction is KeyRange.ASC
desc: Argument to return if self.direction is KeyRange.DESC
Returns:
asc or desc appropriately
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if self.direction == KeyRange.ASC:
return asc
elif self.direction == KeyRange.DESC:
return desc
else:
raise KeyRangeError("KeyRange direction unexpected: %s", self.direction)
def make_directed_query(self, kind_class, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind_class: A kind implementation class.
keys_only: bool, default False, use keys_only on Query?
Returns:
A db.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction("", "-")
query = db.Query(kind_class, keys_only=keys_only)
query.order("%s__key__" % direction)
query = self.filter_query(query)
return query
def make_directed_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction(datastore.Query.ASCENDING,
datastore.Query.DESCENDING)
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", direction))
query = self.filter_datastore_query(query)
return query
def make_ascending_query(self, kind_class, keys_only=False):
"""Construct a query for this key range without setting the scan direction.
Args:
kind_class: A kind implementation class.
keys_only: bool, default False, query only for keys.
Returns:
A db.Query instance.
"""
query = db.Query(kind_class, keys_only=keys_only)
query.order("__key__")
query = self.filter_query(query)
return query
def make_ascending_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range without setting the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
"""
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", datastore.Query.ASCENDING))
query = self.filter_datastore_query(query)
return query
def split_range(self, batch_size=0):
"""Split this key range into a list of at most two ranges.
This method attempts to split the key range approximately in half.
Numeric ranges are split in the middle into two equal ranges and
string ranges are split lexicographically in the middle. If the
key range is smaller than batch_size it is left unsplit.
Note that splitting is done without knowledge of the distribution
of actual entities in the key range, so there is no guarantee (nor
any particular reason to believe) that the entities of the range
are evenly split.
Args:
batch_size: The maximum size of a key range that should not be split.
Returns:
A list of one or two key ranges covering the same space as this range.
"""
key_start = self.key_start
key_end = self.key_end
include_start = self.include_start
include_end = self.include_end
key_pairs = []
if not key_start:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.ASC))
elif not key_end:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.DESC))
else:
key_split = KeyRange.split_keys(key_start, key_end, batch_size)
first_include_end = True
if key_split == key_start:
first_include_end = first_include_end and include_start
key_pairs.append((key_start, include_start,
key_split, first_include_end,
KeyRange.DESC))
second_include_end = include_end
if key_split == key_end:
second_include_end = False
key_pairs.append((key_split, False,
key_end, second_include_end,
KeyRange.ASC))
ranges = [KeyRange(key_start=start,
include_start=include_start,
key_end=end,
include_end=include_end,
direction=direction,
_app=self._app)
for (start, include_start, end, include_end, direction)
in key_pairs]
return ranges
def __cmp__(self, other):
"""Compare two key ranges.
Key ranges with a value of None for key_start or key_end, are always
considered to have include_start=False or include_end=False, respectively,
when comparing. Since None indicates an unbounded side of the range,
the include specifier is meaningless. The ordering generated is total
but somewhat arbitrary.
Args:
other: An object to compare to this one.
Returns:
-1: if this key range is less than other.
0: if this key range is equal to other.
1: if this key range is greater than other.
"""
if not isinstance(other, KeyRange):
return 1
self_list = [self.key_start, self.key_end, self.direction,
self.include_start, self.include_end]
if not self.key_start:
self_list[3] = False
if not self.key_end:
self_list[4] = False
other_list = [other.key_start,
other.key_end,
other.direction,
other.include_start,
other.include_end]
if not other.key_start:
other_list[3] = False
if not other.key_end:
other_list[4] = False
return cmp(self_list, other_list)
@staticmethod
def bisect_string_range(start, end):
"""Returns a string that is approximately in the middle of the range.
(start, end) is treated as a string range, and it is assumed
start <= end in the usual lexicographic string ordering. The output key
mid is guaranteed to satisfy start <= mid <= end.
The method proceeds by comparing initial characters of start and
end. When the characters are equal, they are appended to the mid
string. In the first place that the characters differ, the
difference characters are averaged and this average is appended to
the mid string. If averaging resulted in rounding down, and
additional character is added to the mid string to make up for the
rounding down. This extra step is necessary for correctness in
the case that the average of the two characters is equal to the
character in the start string.
This method makes the assumption that most keys are ascii and it
attempts to perform splitting within the ascii range when that
results in a valid split.
Args:
start: A string.
end: A string such that start <= end.
Returns:
A string mid such that start <= mid <= end.
"""
if start == end:
return start
start += "\0"
end += "\0"
midpoint = []
expected_max = 127
for i in xrange(min(len(start), len(end))):
if start[i] == end[i]:
midpoint.append(start[i])
else:
ord_sum = ord(start[i]) + ord(end[i])
midpoint.append(unichr(ord_sum / 2))
if ord_sum % 2:
if len(start) > i + 1:
ord_start = ord(start[i+1])
else:
ord_start = 0
if ord_start < expected_max:
ord_split = (expected_max + ord_start) / 2
else:
ord_split = (0xFFFF + ord_start) / 2
midpoint.append(unichr(ord_split))
break
return "".join(midpoint)
@staticmethod
def split_keys(key_start, key_end, batch_size):
"""Return a key that is between key_start and key_end inclusive.
This method compares components of the ancestor paths of key_start
and key_end. The first place in the path that differs is
approximately split in half. If the kind components differ, a new
non-existent kind halfway between the two is used to split the
space. If the id_or_name components differ, then a new id_or_name
that is halfway between the two is selected. If the lower
id_or_name is numeric and the upper id_or_name is a string, then
the minumum string key u'\0' is used as the split id_or_name. The
key that is returned is the shared portion of the ancestor path
followed by the generated split component.
Args:
key_start: A db.Key instance for the lower end of a range.
key_end: A db.Key instance for the upper end of a range.
batch_size: The maximum size of a range that should not be split.
Returns:
A db.Key instance, k, such that key_start <= k <= key_end.
"""
assert key_start.app() == key_end.app()
path1 = key_start.to_path()
path2 = key_end.to_path()
len1 = len(path1)
len2 = len(path2)
assert len1 % 2 == 0
assert len2 % 2 == 0
out_path = []
min_path_len = min(len1, len2) / 2
for i in xrange(min_path_len):
kind1 = path1[2*i]
kind2 = path2[2*i]
if kind1 != kind2:
split_kind = KeyRange.bisect_string_range(kind1, kind2)
out_path.append(split_kind)
out_path.append(unichr(0))
break
last = (len1 == len2 == 2*(i + 1))
id_or_name1 = path1[2*i + 1]
id_or_name2 = path2[2*i + 1]
id_or_name_split = KeyRange._split_id_or_name(
id_or_name1, id_or_name2, batch_size, last)
if id_or_name1 == id_or_name_split:
out_path.append(kind1)
out_path.append(id_or_name1)
else:
out_path.append(kind1)
out_path.append(id_or_name_split)
break
return db.Key.from_path(*out_path, **{"_app": key_start.app()})
@staticmethod
def _split_id_or_name(id_or_name1, id_or_name2, batch_size, maintain_batches):
"""Return an id_or_name that is between id_or_name1 an id_or_name2.
Attempts to split the range [id_or_name1, id_or_name2] in half,
unless maintain_batches is true and the size of the range
[id_or_name1, id_or_name2] is less than or equal to batch_size.
Args:
id_or_name1: A number or string or the id_or_name component of a key
id_or_name2: A number or string or the id_or_name component of a key
batch_size: The range size that will not be split if maintain_batches
is true.
maintain_batches: A boolean for whether to keep small ranges intact.
Returns:
An id_or_name such that id_or_name1 <= id_or_name <= id_or_name2.
"""
if (isinstance(id_or_name1, (int, long)) and
isinstance(id_or_name2, (int, long))):
if not maintain_batches or id_or_name2 - id_or_name1 > batch_size:
return (id_or_name1 + id_or_name2) / 2
else:
return id_or_name1
elif (isinstance(id_or_name1, basestring) and
isinstance(id_or_name2, basestring)):
return KeyRange.bisect_string_range(id_or_name1, id_or_name2)
else:
assert (isinstance(id_or_name1, (int, long)) and
isinstance(id_or_name2, basestring))
return unichr(0)
@staticmethod
def guess_end_key(kind,
key_start,
probe_count=10,
split_rate=5):
"""Guess the end of a key range with a binary search of probe queries.
When the 'key_start' parameter has a key hierarchy, this function will
only determine the key range for keys in a similar hierarchy. That means
if the keys are in the form:
kind=Foo, name=bar/kind=Stuff, name=meep
only this range will be probed:
kind=Foo, name=*/kind=Stuff, name=*
That means other entities of kind 'Stuff' that are children of another
parent entity kind will be skipped:
kind=Other, name=cookie/kind=Stuff, name=meep
Args:
key_start: The starting key of the search range. In most cases this
should be id = 0 or name = '\0'.
kind: String name of the entity kind.
probe_count: Optional, how many probe queries to run.
split_rate: Exponential rate to use for splitting the range on the
way down from the full key space. For smaller ranges this should
be higher so more of the keyspace is skipped on initial descent.
Returns:
datastore.Key that is guaranteed to be as high or higher than the
highest key existing for this Kind. Doing a query between 'key_start' and
this returned Key (inclusive) will contain all entities of this Kind.
"""
app = key_start.app()
full_path = key_start.to_path()
for index, piece in enumerate(full_path):
if index % 2 == 0:
continue
elif isinstance(piece, basestring):
full_path[index] = u"\xffff"
else:
full_path[index] = 2**32
key_end = datastore.Key.from_path(*full_path, **{"_app": app})
split_key = key_end
for i in xrange(probe_count):
for j in xrange(split_rate):
split_key = KeyRange.split_keys(key_start, split_key, 1)
results = datastore.Query(
kind,
{"__key__ >": split_key},
_app=app,
keys_only=True).Get(1)
if results:
split_rate = 1
key_start = split_key
split_key = key_end
else:
key_end = split_key
return key_end
def to_json(self):
"""Serialize KeyRange to json.
Returns:
string with KeyRange json representation.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires simplejson to be available")
def key_to_str(key):
if key:
return str(key)
else:
return None
obj_dict = {
"direction": self.direction,
"key_start": key_to_str(self.key_start),
"key_end": key_to_str(self.key_end),
"include_start": self.include_start,
"include_end": self.include_end,
}
if self._app:
obj_dict["_app"] = self._app
return simplejson.dumps(obj_dict, sort_keys=True)
@staticmethod
def from_json(json_str):
"""Deserialize KeyRange from its json representation.
Args:
json_str: string with json representation created by key_range_to_json.
Returns:
deserialized KeyRange instance.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires simplejson to be available")
def key_from_str(key_str):
if key_str:
return db.Key(key_str)
else:
return None
json = simplejson.loads(json_str)
return KeyRange(key_from_str(json["key_start"]),
key_from_str(json["key_end"]),
json["direction"],
json["include_start"],
json["include_end"],
_app=json.get("_app"))
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Key range representation and splitting."""
import os
try:
from mapreduce.lib import simplejson
except ImportError:
simplejson = None
from google.appengine.api import datastore
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
class Error(Exception):
"""Base class for exceptions in this module."""
class KeyRangeError(Error):
"""Error while trying to generate a KeyRange."""
class SimplejsonUnavailableError(Error):
"""Error while using json functionality whith unavailable simplejson."""
class KeyRange(object):
"""Represents a range of keys in the datastore.
A KeyRange object represents a key range
(key_start, include_start, key_end, include_end)
and a scan direction (KeyRange.DESC or KeyRange.ASC).
"""
DESC = "DESC"
ASC = "ASC"
def __init__(self,
key_start=None,
key_end=None,
direction=None,
include_start=True,
include_end=True,
_app=None):
"""Initialize a KeyRange object.
Args:
key_start: The starting key for this range.
key_end: The ending key for this range.
direction: The direction of the query for this range.
include_start: Whether the start key should be included in the range.
include_end: Whether the end key should be included in the range.
"""
if direction is None:
direction = KeyRange.ASC
assert direction in (KeyRange.ASC, KeyRange.DESC)
self.direction = direction
self.key_start = key_start
self.key_end = key_end
self.include_start = include_start
self.include_end = include_end
self._app = _app
def __str__(self):
if self.include_start:
left_side = "["
else:
left_side = "("
if self.include_end:
right_side = "]"
else:
right_side = "("
return "%s%s%s to %s%s" % (self.direction, left_side, repr(self.key_start),
repr(self.key_end), right_side)
def __repr__(self):
return ("key_range.KeyRange(key_start=%s,key_end=%s,direction=%s,"
"include_start=%s,include_end=%s)") % (repr(self.key_start),
repr(self.key_end),
repr(self.direction),
repr(self.include_start),
repr(self.include_end))
def advance(self, key):
"""Updates the start of the range immediately past the specified key.
Args:
key: A db.Key.
"""
self.include_start = False
self.key_start = key
def filter_query(self, query):
"""Add query filter to restrict to this key range.
Args:
query: A db.Query instance.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, db.Query)
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.filter("__key__ %s" % start_comparator, self.key_start)
if self.key_end:
query.filter("__key__ %s" % end_comparator, self.key_end)
return query
def filter_datastore_query(self, query):
"""Add query filter to restrict to this key range.
Args:
query: A datastore.Query instance.
Returns:
The input query restricted to this key range.
"""
assert isinstance(query, datastore.Query)
if self._app:
query.__app = self._app
if self.include_start:
start_comparator = ">="
else:
start_comparator = ">"
if self.include_end:
end_comparator = "<="
else:
end_comparator = "<"
if self.key_start:
query.update({"__key__ %s" % start_comparator: self.key_start})
if self.key_end:
query.update({"__key__ %s" % end_comparator: self.key_end})
return query
def __get_direction(self, asc, desc):
"""Check that self.direction is in (KeyRange.ASC, KeyRange.DESC).
Args:
asc: Argument to return if self.direction is KeyRange.ASC
desc: Argument to return if self.direction is KeyRange.DESC
Returns:
asc or desc appropriately
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
if self.direction == KeyRange.ASC:
return asc
elif self.direction == KeyRange.DESC:
return desc
else:
raise KeyRangeError("KeyRange direction unexpected: %s", self.direction)
def make_directed_query(self, kind_class, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind_class: A kind implementation class.
keys_only: bool, default False, use keys_only on Query?
Returns:
A db.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction("", "-")
query = db.Query(kind_class, keys_only=keys_only)
query.order("%s__key__" % direction)
query = self.filter_query(query)
return query
def make_directed_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range, including the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
Raises:
KeyRangeError: if self.direction is not in (KeyRange.ASC, KeyRange.DESC).
"""
direction = self.__get_direction(datastore.Query.ASCENDING,
datastore.Query.DESCENDING)
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", direction))
query = self.filter_datastore_query(query)
return query
def make_ascending_query(self, kind_class, keys_only=False):
"""Construct a query for this key range without setting the scan direction.
Args:
kind_class: A kind implementation class.
keys_only: bool, default False, query only for keys.
Returns:
A db.Query instance.
"""
query = db.Query(kind_class, keys_only=keys_only)
query.order("__key__")
query = self.filter_query(query)
return query
def make_ascending_datastore_query(self, kind, keys_only=False):
"""Construct a query for this key range without setting the scan direction.
Args:
kind: A string.
keys_only: bool, default False, use keys_only on Query?
Returns:
A datastore.Query instance.
"""
query = datastore.Query(kind, _app=self._app, keys_only=keys_only)
query.Order(("__key__", datastore.Query.ASCENDING))
query = self.filter_datastore_query(query)
return query
def split_range(self, batch_size=0):
"""Split this key range into a list of at most two ranges.
This method attempts to split the key range approximately in half.
Numeric ranges are split in the middle into two equal ranges and
string ranges are split lexicographically in the middle. If the
key range is smaller than batch_size it is left unsplit.
Note that splitting is done without knowledge of the distribution
of actual entities in the key range, so there is no guarantee (nor
any particular reason to believe) that the entities of the range
are evenly split.
Args:
batch_size: The maximum size of a key range that should not be split.
Returns:
A list of one or two key ranges covering the same space as this range.
"""
key_start = self.key_start
key_end = self.key_end
include_start = self.include_start
include_end = self.include_end
key_pairs = []
if not key_start:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.ASC))
elif not key_end:
key_pairs.append((key_start, include_start, key_end, include_end,
KeyRange.DESC))
else:
key_split = KeyRange.split_keys(key_start, key_end, batch_size)
first_include_end = True
if key_split == key_start:
first_include_end = first_include_end and include_start
key_pairs.append((key_start, include_start,
key_split, first_include_end,
KeyRange.DESC))
second_include_end = include_end
if key_split == key_end:
second_include_end = False
key_pairs.append((key_split, False,
key_end, second_include_end,
KeyRange.ASC))
ranges = [KeyRange(key_start=start,
include_start=include_start,
key_end=end,
include_end=include_end,
direction=direction,
_app=self._app)
for (start, include_start, end, include_end, direction)
in key_pairs]
return ranges
def __cmp__(self, other):
"""Compare two key ranges.
Key ranges with a value of None for key_start or key_end, are always
considered to have include_start=False or include_end=False, respectively,
when comparing. Since None indicates an unbounded side of the range,
the include specifier is meaningless. The ordering generated is total
but somewhat arbitrary.
Args:
other: An object to compare to this one.
Returns:
-1: if this key range is less than other.
0: if this key range is equal to other.
1: if this key range is greater than other.
"""
if not isinstance(other, KeyRange):
return 1
self_list = [self.key_start, self.key_end, self.direction,
self.include_start, self.include_end]
if not self.key_start:
self_list[3] = False
if not self.key_end:
self_list[4] = False
other_list = [other.key_start,
other.key_end,
other.direction,
other.include_start,
other.include_end]
if not other.key_start:
other_list[3] = False
if not other.key_end:
other_list[4] = False
return cmp(self_list, other_list)
@staticmethod
def bisect_string_range(start, end):
"""Returns a string that is approximately in the middle of the range.
(start, end) is treated as a string range, and it is assumed
start <= end in the usual lexicographic string ordering. The output key
mid is guaranteed to satisfy start <= mid <= end.
The method proceeds by comparing initial characters of start and
end. When the characters are equal, they are appended to the mid
string. In the first place that the characters differ, the
difference characters are averaged and this average is appended to
the mid string. If averaging resulted in rounding down, and
additional character is added to the mid string to make up for the
rounding down. This extra step is necessary for correctness in
the case that the average of the two characters is equal to the
character in the start string.
This method makes the assumption that most keys are ascii and it
attempts to perform splitting within the ascii range when that
results in a valid split.
Args:
start: A string.
end: A string such that start <= end.
Returns:
A string mid such that start <= mid <= end.
"""
if start == end:
return start
start += "\0"
end += "\0"
midpoint = []
expected_max = 127
for i in xrange(min(len(start), len(end))):
if start[i] == end[i]:
midpoint.append(start[i])
else:
ord_sum = ord(start[i]) + ord(end[i])
midpoint.append(unichr(ord_sum / 2))
if ord_sum % 2:
if len(start) > i + 1:
ord_start = ord(start[i+1])
else:
ord_start = 0
if ord_start < expected_max:
ord_split = (expected_max + ord_start) / 2
else:
ord_split = (0xFFFF + ord_start) / 2
midpoint.append(unichr(ord_split))
break
return "".join(midpoint)
@staticmethod
def split_keys(key_start, key_end, batch_size):
"""Return a key that is between key_start and key_end inclusive.
This method compares components of the ancestor paths of key_start
and key_end. The first place in the path that differs is
approximately split in half. If the kind components differ, a new
non-existent kind halfway between the two is used to split the
space. If the id_or_name components differ, then a new id_or_name
that is halfway between the two is selected. If the lower
id_or_name is numeric and the upper id_or_name is a string, then
the minumum string key u'\0' is used as the split id_or_name. The
key that is returned is the shared portion of the ancestor path
followed by the generated split component.
Args:
key_start: A db.Key instance for the lower end of a range.
key_end: A db.Key instance for the upper end of a range.
batch_size: The maximum size of a range that should not be split.
Returns:
A db.Key instance, k, such that key_start <= k <= key_end.
"""
assert key_start.app() == key_end.app()
path1 = key_start.to_path()
path2 = key_end.to_path()
len1 = len(path1)
len2 = len(path2)
assert len1 % 2 == 0
assert len2 % 2 == 0
out_path = []
min_path_len = min(len1, len2) / 2
for i in xrange(min_path_len):
kind1 = path1[2*i]
kind2 = path2[2*i]
if kind1 != kind2:
split_kind = KeyRange.bisect_string_range(kind1, kind2)
out_path.append(split_kind)
out_path.append(unichr(0))
break
last = (len1 == len2 == 2*(i + 1))
id_or_name1 = path1[2*i + 1]
id_or_name2 = path2[2*i + 1]
id_or_name_split = KeyRange._split_id_or_name(
id_or_name1, id_or_name2, batch_size, last)
if id_or_name1 == id_or_name_split:
out_path.append(kind1)
out_path.append(id_or_name1)
else:
out_path.append(kind1)
out_path.append(id_or_name_split)
break
return db.Key.from_path(*out_path, **{"_app": key_start.app()})
@staticmethod
def _split_id_or_name(id_or_name1, id_or_name2, batch_size, maintain_batches):
"""Return an id_or_name that is between id_or_name1 an id_or_name2.
Attempts to split the range [id_or_name1, id_or_name2] in half,
unless maintain_batches is true and the size of the range
[id_or_name1, id_or_name2] is less than or equal to batch_size.
Args:
id_or_name1: A number or string or the id_or_name component of a key
id_or_name2: A number or string or the id_or_name component of a key
batch_size: The range size that will not be split if maintain_batches
is true.
maintain_batches: A boolean for whether to keep small ranges intact.
Returns:
An id_or_name such that id_or_name1 <= id_or_name <= id_or_name2.
"""
if (isinstance(id_or_name1, (int, long)) and
isinstance(id_or_name2, (int, long))):
if not maintain_batches or id_or_name2 - id_or_name1 > batch_size:
return (id_or_name1 + id_or_name2) / 2
else:
return id_or_name1
elif (isinstance(id_or_name1, basestring) and
isinstance(id_or_name2, basestring)):
return KeyRange.bisect_string_range(id_or_name1, id_or_name2)
else:
assert (isinstance(id_or_name1, (int, long)) and
isinstance(id_or_name2, basestring))
return unichr(0)
@staticmethod
def guess_end_key(kind,
key_start,
probe_count=10,
split_rate=5):
"""Guess the end of a key range with a binary search of probe queries.
When the 'key_start' parameter has a key hierarchy, this function will
only determine the key range for keys in a similar hierarchy. That means
if the keys are in the form:
kind=Foo, name=bar/kind=Stuff, name=meep
only this range will be probed:
kind=Foo, name=*/kind=Stuff, name=*
That means other entities of kind 'Stuff' that are children of another
parent entity kind will be skipped:
kind=Other, name=cookie/kind=Stuff, name=meep
Args:
key_start: The starting key of the search range. In most cases this
should be id = 0 or name = '\0'.
kind: String name of the entity kind.
probe_count: Optional, how many probe queries to run.
split_rate: Exponential rate to use for splitting the range on the
way down from the full key space. For smaller ranges this should
be higher so more of the keyspace is skipped on initial descent.
Returns:
datastore.Key that is guaranteed to be as high or higher than the
highest key existing for this Kind. Doing a query between 'key_start' and
this returned Key (inclusive) will contain all entities of this Kind.
"""
app = key_start.app()
full_path = key_start.to_path()
for index, piece in enumerate(full_path):
if index % 2 == 0:
continue
elif isinstance(piece, basestring):
full_path[index] = u"\xffff"
else:
full_path[index] = 2**32
key_end = datastore.Key.from_path(*full_path, **{"_app": app})
split_key = key_end
for i in xrange(probe_count):
for j in xrange(split_rate):
split_key = KeyRange.split_keys(key_start, split_key, 1)
results = datastore.Query(
kind,
{"__key__ >": split_key},
_app=app,
keys_only=True).Get(1)
if results:
split_rate = 1
key_start = split_key
split_key = key_end
else:
key_end = split_key
return key_end
def to_json(self):
"""Serialize KeyRange to json.
Returns:
string with KeyRange json representation.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires simplejson to be available")
def key_to_str(key):
if key:
return str(key)
else:
return None
obj_dict = {
"direction": self.direction,
"key_start": key_to_str(self.key_start),
"key_end": key_to_str(self.key_end),
"include_start": self.include_start,
"include_end": self.include_end,
}
if self._app:
obj_dict["_app"] = self._app
return simplejson.dumps(obj_dict, sort_keys=True)
@staticmethod
def from_json(json_str):
"""Deserialize KeyRange from its json representation.
Args:
json_str: string with json representation created by key_range_to_json.
Returns:
deserialized KeyRange instance.
"""
if simplejson is None:
raise SimplejsonUnavailableError(
"JSON functionality requires simplejson to be available")
def key_from_str(key_str):
if key_str:
return db.Key(key_str)
else:
return None
json = simplejson.loads(json_str)
return KeyRange(key_from_str(json["key_start"]),
key_from_str(json["key_end"]),
json["direction"],
json["include_start"],
json["include_end"],
_app=json.get("_app"))
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python |
#!/usr/bin/env python
"""Implementation of JSONEncoder
"""
import re
try:
from mapreduce.lib.simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from mapreduce.lib.simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
#!/usr/bin/env python
"""Implementation of JSONEncoder
"""
import re
try:
from mapreduce.lib.simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
except ImportError:
c_encode_basestring_ascii = None
try:
from mapreduce.lib.simplejson._speedups import make_encoder as c_make_encoder
except ImportError:
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| Python |
#!/usr/bin/env python
r"""A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> compact = simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
>>> # Can't assume dict ordering
>>> compact in ('[1,2,3,{"4":5,"6":7}]', '[1,2,3,{"6":7,"4":5}]')
True
Pretty printing (using repr() because of extraneous whitespace in the output)::
>>> import simplejson
>>> print repr(simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
'{\n "4": 5, \n "6": 7\n}'
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == ["foo", {"bar":["baz", None, 1.0, 2]}]
True
>>> simplejson.loads('"\\"foo\\bar"') == '"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io) == ["streaming API"]
True
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> simplejson.loads('1.1', parse_float=Decimal) == Decimal("1.1")
True
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(ComplexEncoder().iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.5'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| Python |
#!/usr/bin/env python
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from mapreduce.lib.simplejson.scanner import make_scanner
try:
from mapreduce.lib.simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
elif terminator != '\\':
if strict:
raise ValueError(errmsg("Invalid control character %r at", s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
next_end = end + 5
msg = "Invalid \\uXXXX escape"
try:
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = unichr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end))
end = next_end
_append(m)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| Python |
#!/usr/bin/env python
"""JSON token scanner
"""
import re
try:
from mapreduce.lib.simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
#!/usr/bin/env python
"""Implementation of JSONDecoder
"""
import re
import sys
import struct
from mapreduce.lib.simplejson.scanner import make_scanner
try:
from mapreduce.lib.simplejson._speedups import scanstring as c_scanstring
except ImportError:
c_scanstring = None
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def linecol(doc, pos):
lineno = doc.count('\n', 0, pos) + 1
if lineno == 1:
colno = pos
else:
colno = pos - doc.rindex('\n', 0, pos)
return lineno, colno
def errmsg(msg, doc, pos, end=None):
# Note that this function is called from _speedups
lineno, colno = linecol(doc, pos)
if end is None:
return '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
endlineno, endcolno = linecol(doc, end)
return '%s: line %d column %d - line %d column %d (char %d - %d)' % (
msg, lineno, colno, endlineno, endcolno, pos, end)
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u'"', '\\': u'\\', '/': u'/',
'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
end = chunk.end()
content, terminator = chunk.groups()
if content:
if not isinstance(content, unicode):
content = unicode(content, encoding)
_append(content)
if terminator == '"':
break
elif terminator != '\\':
if strict:
raise ValueError(errmsg("Invalid control character %r at", s, end))
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise ValueError(
errmsg("Unterminated string starting at", s, begin))
if esc != 'u':
try:
m = _b[esc]
except KeyError:
raise ValueError(
errmsg("Invalid \\escape: %r" % (esc,), s, end))
end += 1
else:
esc = s[end + 1:end + 5]
next_end = end + 5
msg = "Invalid \\uXXXX escape"
try:
if len(esc) != 4:
raise ValueError
uni = int(esc, 16)
if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
if not s[end + 5:end + 7] == '\\u':
raise ValueError
esc2 = s[end + 7:end + 11]
if len(esc2) != 4:
raise ValueError
uni2 = int(esc2, 16)
uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
next_end += 6
m = unichr(uni)
except ValueError:
raise ValueError(errmsg(msg, s, end))
end = next_end
_append(m)
return u''.join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
pairs = {}
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
return pairs, end + 1
elif nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end))
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise ValueError(errmsg("Expecting : delimiter", s, end))
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
pairs[key] = value
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise ValueError(errmsg("Expecting property name", s, end - 1))
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError(errmsg("Expecting object", s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise ValueError(errmsg("Expecting , delimiter", s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True):
"""``encoding`` determines the encoding used to interpret any ``str``
objects decoded by this instance (utf-8 by default). It has no
effect when decoding ``unicode`` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as ``unicode``.
``object_hook``, if specified, will be called with the result
of every JSON object decoded and its return value will be used in
place of the given ``dict``. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
"""
self.encoding = encoding
self.object_hook = object_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise ValueError(errmsg("Extra data", s, end, len(s)))
return obj
def raw_decode(self, s, idx=0):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
try:
obj, end = self.scan_once(s, idx)
except StopIteration:
raise ValueError("No JSON object could be decoded")
return obj, end
| Python |
#!/usr/bin/env python
r"""A simple, fast, extensible JSON encoder and decoder
JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
simplejson exposes an API familiar to uses of the standard library
marshal and pickle modules.
Encoding basic Python object hierarchies::
>>> import simplejson
>>> simplejson.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print simplejson.dumps("\"foo\bar")
"\"foo\bar"
>>> print simplejson.dumps(u'\u1234')
"\u1234"
>>> print simplejson.dumps('\\')
"\\"
>>> print simplejson.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> simplejson.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson
>>> compact = simplejson.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
>>> # Can't assume dict ordering
>>> compact in ('[1,2,3,{"4":5,"6":7}]', '[1,2,3,{"6":7,"4":5}]')
True
Pretty printing (using repr() because of extraneous whitespace in the output)::
>>> import simplejson
>>> print repr(simplejson.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
'{\n "4": 5, \n "6": 7\n}'
Decoding JSON::
>>> import simplejson
>>> simplejson.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == ["foo", {"bar":["baz", None, 1.0, 2]}]
True
>>> simplejson.loads('"\\"foo\\bar"') == '"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> simplejson.load(io) == ["streaming API"]
True
Specializing JSON object decoding::
>>> import simplejson
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> simplejson.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> simplejson.loads('1.1', parse_float=Decimal) == Decimal("1.1")
True
Extending JSONEncoder::
>>> import simplejson
>>> class ComplexEncoder(simplejson.JSONEncoder):
... def default(self, obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... return simplejson.JSONEncoder.default(self, obj)
...
>>> dumps(2 + 1j, cls=ComplexEncoder)
'[2.0, 1.0]'
>>> ComplexEncoder().encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(ComplexEncoder().iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson from the shell to validate and
pretty-print::
$ echo '{"json":"obj"}' | python -msimplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -msimplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.5'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is ``True`` then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is ``False``, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is ``False``, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is ``False``, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (skipkeys is False and ensure_ascii is True and
check_circular is True and allow_nan is True and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| Python |
#!/usr/bin/env python
"""JSON token scanner
"""
import re
try:
from mapreduce.lib.simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to line charts."""
import copy
import warnings
from mapreduce.lib.graphy import common
class LineStyle(object):
"""Represents the style for a line on a line chart. Also provides some
convenient presets.
Object attributes (Passed directly to the Google Chart API. Check there for
details):
width: Width of the line
on: Length of a line segment (for dashed/dotted lines)
off: Length of a break (for dashed/dotted lines)
color: Color of the line. A hex string, like 'ff0000' for red. Optional,
AutoColor will fill this in for you automatically if empty.
Some common styles, such as LineStyle.dashed, are available:
solid
dashed
dotted
thick_solid
thick_dashed
thick_dotted
"""
# Widths
THIN = 1
THICK = 2
# Patterns
# ((on, off) tuples, as passed to LineChart.AddLine)
SOLID = (1, 0)
DASHED = (8, 4)
DOTTED = (2, 4)
def __init__(self, width, on, off, color=None):
"""Construct a LineStyle. See class docstring for details on args."""
self.width = width
self.on = on
self.off = off
self.color = color
LineStyle.solid = LineStyle(1, 1, 0)
LineStyle.dashed = LineStyle(1, 8, 4)
LineStyle.dotted = LineStyle(1, 2, 4)
LineStyle.thick_solid = LineStyle(2, 1, 0)
LineStyle.thick_dashed = LineStyle(2, 8, 4)
LineStyle.thick_dotted = LineStyle(2, 2, 4)
class LineChart(common.BaseChart):
"""Represents a line chart."""
def __init__(self, points=None):
super(LineChart, self).__init__()
if points is not None:
self.AddLine(points)
def AddLine(self, points, label=None, color=None,
pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None):
"""Add a new line to the chart.
This is a convenience method which constructs the DataSeries and appends it
for you. It returns the new series.
points: List of equally-spaced y-values for the line
label: Name of the line (used for the legend)
color: Hex string, like 'ff0000' for red
pattern: Tuple for (length of segment, length of gap). i.e.
LineStyle.DASHED
width: Width of the line (i.e. LineStyle.THIN)
markers: List of Marker objects to attach to this line (see DataSeries
for more info)
"""
if color is not None and isinstance(color[0], common.Marker):
warnings.warn('Your code may be broken! '
'You passed a list of Markers instead of a color. The '
'old argument order (markers before color) is deprecated.',
DeprecationWarning, stacklevel=2)
style = LineStyle(width, pattern[0], pattern[1], color=color)
series = common.DataSeries(points, label=label, style=style,
markers=markers)
self.data.append(series)
return series
def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None,
label=None):
"""DEPRECATED"""
warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ',
DeprecationWarning, stacklevel=2)
return self.AddLine(points, color=color, width=style.width,
pattern=(style.on, style.off), markers=markers,
label=label)
class Sparkline(LineChart):
"""Represent a sparkline. These behave like LineCharts,
mostly, but come without axes.
"""
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for pie charts."""
import warnings
from mapreduce.lib.graphy import common
from mapreduce.lib.graphy import util
class Segment(common.DataSeries):
"""A single segment of the pie chart.
Object attributes:
size: relative size of the segment
label: label of the segment (if any)
color: color of the segment (if any)
"""
def __init__(self, size, label=None, color=None):
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label looks like a hex triplet; it might be a color. '
'The old argument order (color before label) is '
'deprecated.',
DeprecationWarning, stacklevel=2)
style = common._BasicStyle(color)
super(Segment, self).__init__([size], label=label, style=style)
assert size >= 0
def _GetSize(self):
return self.data[0]
def _SetSize(self, value):
assert value >= 0
self.data[0] = value
size = property(_GetSize, _SetSize,
doc = """The relative size of this pie segment.""")
# Since Segments are so simple, provide color for convenience.
def _GetColor(self):
return self.style.color
def _SetColor(self, color):
self.style.color = color
color = property(_GetColor, _SetColor,
doc = """The color of this pie segment.""")
class PieChart(common.BaseChart):
"""Represents a pie chart.
The pie chart consists of a single "pie" by default, but additional pies
may be added using the AddPie method. The Google Chart API will display
the pies as concentric circles, with pie #0 on the inside; other backends
may display the pies differently.
"""
def __init__(self, points=None, labels=None, colors=None):
"""Constructor for PieChart objects.
Creates a pie chart with a single pie.
Args:
points: A list of data points for the pie chart;
i.e., relative sizes of the pie segments
labels: A list of labels for the pie segments.
TODO: Allow the user to pass in None as one of
the labels in order to skip that label.
colors: A list of colors for the pie segments, as hex strings
(f.ex. '0000ff' for blue). If there are less colors than pie
segments, the Google Chart API will attempt to produce a smooth
color transition between segments by spreading the colors across
them.
"""
super(PieChart, self).__init__()
self.formatters = []
self._colors = None
if points:
self.AddPie(points, labels, colors)
def AddPie(self, points, labels=None, colors=None):
"""Add a whole pie to the chart.
Args:
points: A list of pie segment sizes
labels: A list of labels for the pie segments
colors: A list of colors for the segments. Missing colors will be chosen
automatically.
Return:
The index of the newly added pie.
"""
num_colors = len(colors or [])
num_labels = len(labels or [])
pie_index = len(self.data)
self.data.append([])
for i, pt in enumerate(points):
label = None
if i < num_labels:
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color, pie_index=pie_index)
return pie_index
def AddSegments(self, points, labels, colors):
"""DEPRECATED."""
warnings.warn('PieChart.AddSegments is deprecated. Call AddPie instead. ',
DeprecationWarning, stacklevel=2)
num_colors = len(colors or [])
for i, pt in enumerate(points):
assert pt >= 0
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color)
def AddSegment(self, size, label=None, color=None, pie_index=0):
"""Add a pie segment to this chart, and return the segment.
size: The size of the segment.
label: The label for the segment.
color: The color of the segment, or None to automatically choose the color.
pie_index: The index of the pie that will receive the new segment.
By default, the chart has one pie (pie #0); use the AddPie method to
add more pies.
"""
if isinstance(size, Segment):
warnings.warn("AddSegment(segment) is deprecated. Use AddSegment(size, "
"label, color) instead", DeprecationWarning, stacklevel=2)
segment = size
else:
segment = Segment(size, label=label, color=color)
assert segment.size >= 0
if pie_index == 0 and not self.data:
# Create the default pie
self.data.append([])
assert (pie_index >= 0 and pie_index < len(self.data))
self.data[pie_index].append(segment)
return segment
def AddSeries(self, points, color=None, style=None, markers=None, label=None):
"""DEPRECATED
Add a new segment to the chart and return it.
The segment must contain exactly one data point; all parameters
other than color and label are ignored.
"""
warnings.warn('PieChart.AddSeries is deprecated. Call AddSegment or '
'AddSegments instead.', DeprecationWarning)
return self.AddSegment(Segment(points[0], color=color, label=label))
def SetColors(self, *colors):
"""Change the colors of this chart to the specified list of colors.
Note that this will completely override the individual colors specified
in the pie segments. Missing colors will be interpolated, so that the
list of colors covers all segments in all the pies.
"""
self._colors = colors
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to bar charts."""
import copy
import warnings
from mapreduce.lib.graphy import common
from mapreduce.lib.graphy import util
class BarsStyle(object):
"""Style of a series of bars in a BarChart
Object Attributes:
color: Hex string, like '00ff00' for green
"""
def __init__(self, color):
self.color = color
class BarChartStyle(object):
"""Represents the style for bars on a BarChart.
Any of the object attributes may be set to None, in which case the
value will be auto-calculated.
Object Attributes:
bar_thickness: The thickness of a bar, in pixels.
bar_gap: The gap between bars, in pixels, or as a fraction of bar thickness
if use_fractional_gap_spacing is True.
group_gap: The gap between groups of bars, in pixels, or as a fraction of
bar thickness if use_fractional_gap_spacing is True.
use_fractional_gap_spacing: if True, bar_gap and group_gap specify gap
sizes as a fraction of bar width. Default is False.
"""
_DEFAULT_GROUP_GAP = 8
_DEFAULT_BAR_GAP = 4
def __init__(self, bar_thickness=None,
bar_gap=_DEFAULT_BAR_GAP, group_gap=_DEFAULT_GROUP_GAP,
use_fractional_gap_spacing=False):
"""Create a new BarChartStyle.
Args:
bar_thickness: The thickness of a bar, in pixels. Set this to None if
you want the bar thickness to be auto-calculated (this is the default
behaviour).
bar_gap: The gap between bars, in pixels. Default is 4.
group_gap: The gap between groups of bars, in pixels. Default is 8.
"""
self.bar_thickness = bar_thickness
self.bar_gap = bar_gap
self.group_gap = group_gap
self.use_fractional_gap_spacing = use_fractional_gap_spacing
class BarStyle(BarChartStyle):
def __init__(self, *args, **kwargs):
warnings.warn('BarStyle is deprecated. Use BarChartStyle.',
DeprecationWarning, stacklevel=2)
super(BarStyle, self).__init__(*args, **kwargs)
class BarChart(common.BaseChart):
"""Represents a bar chart.
Object attributes:
vertical: if True, the bars will be vertical. Default is True.
stacked: if True, the bars will be stacked. Default is False.
style: The BarChartStyle for all bars on this chart, specifying bar
thickness and gaps between bars.
"""
def __init__(self, points=None):
"""Constructor for BarChart objects."""
super(BarChart, self).__init__()
if points is not None:
self.AddBars(points)
self.vertical = True
self.stacked = False
self.style = BarChartStyle(None, None, None) # full auto
def AddBars(self, points, label=None, color=None):
"""Add a series of bars to the chart.
points: List of y-values for the bars in this series
label: Name of the series (used in the legend)
color: Hex string, like '00ff00' for green
This is a convenience method which constructs & appends the DataSeries for
you.
"""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label is a hex triplet. Maybe it is a color? The '
'old argument order (color before label) is deprecated.',
DeprecationWarning, stacklevel=2)
style = BarsStyle(color)
series = common.DataSeries(points, label=label, style=style)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Get the dependendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
else:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
def GetIndependentAxes(self):
"""Get the independendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
else:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
def GetDependentAxis(self):
"""Get the main dependendant axis, which depends on orientation."""
if self.vertical:
return self.left
else:
return self.bottom
def GetIndependentAxis(self):
"""Get the main independendant axis, which depends on orientation."""
if self.vertical:
return self.bottom
else:
return self.left
def GetMinMaxValues(self):
"""Get the largest & smallest bar values as (min_value, max_value)."""
if not self.stacked:
return super(BarChart, self).GetMinMaxValues()
if not self.data:
return None, None # No data, nothing to do.
num_bars = max(len(series.data) for series in self.data)
positives = [0 for i in xrange(0, num_bars)]
negatives = list(positives)
for series in self.data:
for i, point in enumerate(series.data):
if point:
if point > 0:
positives[i] += point
else:
negatives[i] += point
min_value = min(min(positives), min(negatives))
max_value = max(max(positives), max(negatives))
return min_value, max_value
| Python |
#!/usr/bin/env python
__version__='1.0'
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various formatters which can help format a chart
object. To use these, add them to your chart's list of formatters. For
example:
chart.formatters.append(InlineLegend)
chart.formatters.append(LabelSeparator(right=8))
Feel free to write your own formatter. Formatters are just callables that
modify the chart in some (hopefully useful) way. For example, the AutoColor
formatter makes sure each DataSeries has a color applied to it. The formatter
should take the chart to format as its only argument.
(The formatters work on a deepcopy of the user's chart, so modifications
shouldn't leak back into the user's original chart)
"""
def AutoLegend(chart):
"""Automatically fill out the legend based on series labels. This will only
fill out the legend if is at least one series with a label.
"""
chart._show_legend = False
labels = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
chart._show_legend = True
if chart._show_legend:
chart._legend_labels = labels
class AutoColor(object):
"""Automatically add colors to any series without colors.
Object attributes:
colors: The list of colors (hex strings) to cycle through. You can modify
this list if you don't like the default colors.
"""
def __init__(self):
# TODO: Add a few more default colors.
# TODO: Add a default styles too, so if you don't specify color or
# style, you get a unique set of colors & styles for your data.
self.colors = ['0000ff', 'ff0000', '00dd00', '000000']
def __call__(self, chart):
index = -1
for series in chart.data:
if series.style.color is None:
index += 1
if index >= len(self.colors):
index = 0
series.style.color = self.colors[index]
class AutoScale(object):
"""If you don't set min/max on the dependent axes, this fills them in
automatically by calculating min/max dynamically from the data.
You can set just min or just max and this formatter will fill in the other
value for you automatically. For example, if you only set min then this will
set max automatically, but leave min untouched.
Charts can have multiple dependent axes (chart.left & chart.right, for
example.) If you set min/max on some axes but not others, then this formatter
copies your min/max to the un-set axes. For example, if you set up min/max on
only the right axis then your values will be automatically copied to the left
axis. (if you use different min/max values for different axes, the
precendence is undefined. So don't do that.)
"""
def __init__(self, buffer=0.05):
"""Create a new AutoScale formatter.
Args:
buffer: percentage of extra space to allocate around the chart's axes.
"""
self.buffer = buffer
def __call__(self, chart):
"""Format the chart by setting the min/max values on its dependent axis."""
if not chart.data:
return # Nothing to do.
min_value, max_value = chart.GetMinMaxValues()
if None in (min_value, max_value):
return # No data. Nothing to do.
# Honor user's choice, if they've picked min/max.
for axis in chart.GetDependentAxes():
if axis.min is not None:
min_value = axis.min
if axis.max is not None:
max_value = axis.max
buffer = (max_value - min_value) * self.buffer # Stay away from edge.
for axis in chart.GetDependentAxes():
if axis.min is None:
axis.min = min_value - buffer
if axis.max is None:
axis.max = max_value + buffer
class LabelSeparator(object):
"""Adjust the label positions to avoid having them overlap. This happens for
any axis with minimum_label_spacing set.
"""
def __init__(self, left=None, right=None, bottom=None):
self.left = left
self.right = right
self.bottom = bottom
def __call__(self, chart):
self.AdjustLabels(chart.left, self.left)
self.AdjustLabels(chart.right, self.right)
self.AdjustLabels(chart.bottom, self.bottom)
def AdjustLabels(self, axis, minimum_label_spacing):
if minimum_label_spacing is None:
return
if len(axis.labels) <= 1: # Nothing to adjust
return
if axis.max is not None and axis.min is not None:
# Find the spacing required to fit all labels evenly.
# Don't try to push them farther apart than that.
maximum_possible_spacing = (axis.max - axis.min) / (len(axis.labels) - 1)
if minimum_label_spacing > maximum_possible_spacing:
minimum_label_spacing = maximum_possible_spacing
labels = [list(x) for x in zip(axis.label_positions, axis.labels)]
labels = sorted(labels, reverse=True)
# First pass from the top, moving colliding labels downward
for i in range(1, len(labels)):
if labels[i - 1][0] - labels[i][0] < minimum_label_spacing:
new_position = labels[i - 1][0] - minimum_label_spacing
if axis.min is not None and new_position < axis.min:
new_position = axis.min
labels[i][0] = new_position
# Second pass from the bottom, moving colliding labels upward
for i in range(len(labels) - 2, -1, -1):
if labels[i][0] - labels[i + 1][0] < minimum_label_spacing:
new_position = labels[i + 1][0] + minimum_label_spacing
if axis.max is not None and new_position > axis.max:
new_position = axis.max
labels[i][0] = new_position
# Separate positions and labels
label_positions, labels = zip(*labels)
axis.labels = labels
axis.label_positions = label_positions
def InlineLegend(chart):
"""Provide a legend for line charts by attaching labels to the right
end of each line. Supresses the regular legend.
"""
show = False
labels = []
label_positions = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
show = True
label_positions.append(series.data[-1])
if show:
chart.right.min = chart.left.min
chart.right.max = chart.left.max
chart.right.labels = labels
chart.right.label_positions = label_positions
chart._show_legend = False # Supress the regular legend.
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code common to all chart types."""
import copy
import warnings
from mapreduce.lib.graphy import formatters
from mapreduce.lib.graphy import util
class Marker(object):
"""Represents an abstract marker, without position. You can attach these to
a DataSeries.
Object attributes:
shape: One of the shape codes (Marker.arrow, Marker.diamond, etc.)
color: color (as hex string, f.ex. '0000ff' for blue)
size: size of the marker
"""
# TODO: Write an example using markers.
# Shapes:
arrow = 'a'
cross = 'c'
diamond = 'd'
circle = 'o'
square = 's'
x = 'x'
# Note: The Google Chart API also knows some other markers ('v', 'V', 'r',
# 'b') that I think would fit better into a grid API.
# TODO: Make such a grid API
def __init__(self, shape, color, size):
"""Construct a Marker. See class docstring for details on args."""
# TODO: Shapes 'r' and 'b' would be much easier to use if they had a
# special-purpose API (instead of trying to fake it with markers)
self.shape = shape
self.color = color
self.size = size
class _BasicStyle(object):
"""Basic style object. Used internally."""
def __init__(self, color):
self.color = color
class DataSeries(object):
"""Represents one data series for a chart (both data & presentation
information).
Object attributes:
points: List of numbers representing y-values (x-values are not specified
because the Google Chart API expects even x-value spacing).
label: String with the series' label in the legend. The chart will only
have a legend if at least one series has a label. If some series
do not have a label then they will have an empty description in
the legend. This is currently a limitation in the Google Chart
API.
style: A chart-type-specific style object. (LineStyle for LineChart,
BarsStyle for BarChart, etc.)
markers: List of (x, m) tuples where m is a Marker object and x is the
x-axis value to place it at.
The "fill" markers ('r' & 'b') are a little weird because they
aren't a point on a line. For these, you can fake it by
passing slightly weird data (I'd like a better API for them at
some point):
For 'b', you attach the marker to the starting series, and set x
to the index of the ending line. Size is ignored, I think.
For 'r', you can attach to any line, specify the starting
y-value for x and the ending y-value for size. Y, in this case,
is becase 0.0 (bottom) and 1.0 (top).
color: DEPRECATED
"""
# TODO: Should we require the points list to be non-empty ?
# TODO: Do markers belong here? They are really only used for LineCharts
def __init__(self, points, label=None, style=None, markers=None, color=None):
"""Construct a DataSeries. See class docstring for details on args."""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! Label is a hex triplet. Maybe '
'it is a color? The old argument order (color & style '
'before label) is deprecated.', DeprecationWarning,
stacklevel=2)
if color is not None:
warnings.warn('Passing color is deprecated. Pass a style object '
'instead.', DeprecationWarning, stacklevel=2)
# Attempt to fix it for them. If they also passed a style, honor it.
if style is None:
style = _BasicStyle(color)
if style is not None and isinstance(style, basestring):
warnings.warn('Your code is broken! Style is a string, not an object. '
'Maybe you are passing a color? Passing color is '
'deprecated; pass a style object instead.',
DeprecationWarning, stacklevel=2)
if style is None:
style = _BasicStyle(None)
self.data = points
self.style = style
self.markers = markers or []
self.label = label
def _GetColor(self):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
return self.style.color
def _SetColor(self, color):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
self.style.color = color
color = property(_GetColor, _SetColor)
class AxisPosition(object):
"""Represents all the available axis positions.
The available positions are as follows:
AxisPosition.TOP
AxisPosition.BOTTOM
AxisPosition.LEFT
AxisPosition.RIGHT
"""
LEFT = 'y'
RIGHT = 'r'
BOTTOM = 'x'
TOP = 't'
class Axis(object):
"""Represents one axis.
Object setings:
min: Minimum value for the bottom or left end of the axis
max: Max value.
labels: List of labels to show along the axis.
label_positions: List of positions to show the labels at. Uses the scale
set by min & max, so if you set min = 0 and max = 10, then
label positions [0, 5, 10] would be at the bottom,
middle, and top of the axis, respectively.
grid_spacing: Amount of space between gridlines (in min/max scale).
A value of 0 disables gridlines.
label_gridlines: If True, draw a line extending from each label
on the axis all the way across the chart.
"""
def __init__(self, axis_min=None, axis_max=None):
"""Construct a new Axis.
Args:
axis_min: smallest value on the axis
axis_max: largest value on the axis
"""
self.min = axis_min
self.max = axis_max
self.labels = []
self.label_positions = []
self.grid_spacing = 0
self.label_gridlines = False
# TODO: Add other chart types. Order of preference:
# - scatter plots
# - us/world maps
class BaseChart(object):
"""Base chart object with standard behavior for all other charts.
Object attributes:
data: List of DataSeries objects. Chart subtypes provide convenience
functions (like AddLine, AddBars, AddSegment) to add more series
later.
left/right/bottom/top: Axis objects for the 4 different axes.
formatters: A list of callables which will be used to format this chart for
display. TODO: Need better documentation for how these
work.
auto_scale, auto_color, auto_legend:
These aliases let users access the default formatters without poking
around in self.formatters. If the user removes them from
self.formatters then they will no longer be enabled, even though they'll
still be accessible through the aliases. Similarly, re-assigning the
aliases has no effect on the contents of self.formatters.
display: This variable is reserved for backends to populate with a display
object. The intention is that the display object would be used to
render this chart. The details of what gets put here depends on
the specific backend you are using.
"""
# Canonical ordering of position keys
_POSITION_CODES = 'yrxt'
# TODO: Add more inline args to __init__ (esp. labels).
# TODO: Support multiple series in the constructor, if given.
def __init__(self):
"""Construct a BaseChart object."""
self.data = []
self._axes = {}
for code in self._POSITION_CODES:
self._axes[code] = [Axis()]
self._legend_labels = [] # AutoLegend fills this out
self._show_legend = False # AutoLegend fills this out
# Aliases for default formatters
self.auto_color = formatters.AutoColor()
self.auto_scale = formatters.AutoScale()
self.auto_legend = formatters.AutoLegend
self.formatters = [self.auto_color, self.auto_scale, self.auto_legend]
# display is used to convert the chart into something displayable (like a
# url or img tag).
self.display = None
def AddFormatter(self, formatter):
"""Add a new formatter to the chart (convenience method)."""
self.formatters.append(formatter)
def AddSeries(self, points, color=None, style=None, markers=None,
label=None):
"""DEPRECATED
Add a new series of data to the chart; return the DataSeries object."""
warnings.warn('AddSeries is deprecated. Instead, call AddLine for '
'LineCharts, AddBars for BarCharts, AddSegment for '
'PieCharts ', DeprecationWarning, stacklevel=2)
series = DataSeries(points, color=color, style=style, markers=markers,
label=label)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Return any dependent axes ('left' and 'right' by default for LineCharts,
although bar charts would use 'bottom' and 'top').
"""
return self._axes[AxisPosition.LEFT] + self._axes[AxisPosition.RIGHT]
def GetIndependentAxes(self):
"""Return any independent axes (normally top & bottom, although horizontal
bar charts use left & right by default).
"""
return self._axes[AxisPosition.TOP] + self._axes[AxisPosition.BOTTOM]
def GetDependentAxis(self):
"""Return this chart's main dependent axis (often 'left', but
horizontal bar-charts use 'bottom').
"""
return self.left
def GetIndependentAxis(self):
"""Return this chart's main independent axis (often 'bottom', but
horizontal bar-charts use 'left').
"""
return self.bottom
def _Clone(self):
"""Make a deep copy this chart.
Formatters & display will be missing from the copy, due to limitations in
deepcopy.
"""
orig_values = {}
# Things which deepcopy will likely choke on if it tries to copy.
uncopyables = ['formatters', 'display', 'auto_color', 'auto_scale',
'auto_legend']
for name in uncopyables:
orig_values[name] = getattr(self, name)
setattr(self, name, None)
clone = copy.deepcopy(self)
for name, orig_value in orig_values.iteritems():
setattr(self, name, orig_value)
return clone
def GetFormattedChart(self):
"""Get a copy of the chart with formatting applied."""
# Formatters need to mutate the chart, but we don't want to change it out
# from under the user. So, we work on a copy of the chart.
scratchpad = self._Clone()
for formatter in self.formatters:
formatter(scratchpad)
return scratchpad
def GetMinMaxValues(self):
"""Get the largest & smallest values in this chart, returned as
(min_value, max_value). Takes into account complciations like stacked data
series.
For example, with non-stacked series, a chart with [1, 2, 3] and [4, 5, 6]
would return (1, 6). If the same chart was stacking the data series, it
would return (5, 9).
"""
MinPoint = lambda data: min(x for x in data if x is not None)
MaxPoint = lambda data: max(x for x in data if x is not None)
mins = [MinPoint(series.data) for series in self.data if series.data]
maxes = [MaxPoint(series.data) for series in self.data if series.data]
if not mins or not maxes:
return None, None # No data, just bail.
return min(mins), max(maxes)
def AddAxis(self, position, axis):
"""Add an axis to this chart in the given position.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to add, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, []).append(axis)
return axis
def GetAxis(self, position):
"""Get or create the first available axis in the given position.
This is a helper method for the left, right, top, and bottom properties.
If the specified axis does not exist, it will be created.
Args:
position: the position to search for
Returns:
The first axis in the given position
"""
# Not using setdefault here just in case, to avoid calling the Axis()
# constructor needlessly
if position in self._axes:
return self._axes[position][0]
else:
axis = Axis()
self._axes[position] = [axis]
return axis
def SetAxis(self, position, axis):
"""Set the first axis in the given position to the given value.
This is a helper method for the left, right, top, and bottom properties.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to set, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, [None])[0] = axis
return axis
def _GetAxes(self):
"""Return a generator of (position_code, Axis) tuples for this chart's axes.
The axes will be sorted by position using the canonical ordering sequence,
_POSITION_CODES.
"""
for code in self._POSITION_CODES:
for axis in self._axes.get(code, []):
yield (code, axis)
def _GetBottom(self):
return self.GetAxis(AxisPosition.BOTTOM)
def _SetBottom(self, value):
self.SetAxis(AxisPosition.BOTTOM, value)
bottom = property(_GetBottom, _SetBottom,
doc="""Get or set the bottom axis""")
def _GetLeft(self):
return self.GetAxis(AxisPosition.LEFT)
def _SetLeft(self, value):
self.SetAxis(AxisPosition.LEFT, value)
left = property(_GetLeft, _SetLeft,
doc="""Get or set the left axis""")
def _GetRight(self):
return self.GetAxis(AxisPosition.RIGHT)
def _SetRight(self, value):
self.SetAxis(AxisPosition.RIGHT, value)
right = property(_GetRight, _SetRight,
doc="""Get or set the right axis""")
def _GetTop(self):
return self.GetAxis(AxisPosition.TOP)
def _SetTop(self, value):
self.SetAxis(AxisPosition.TOP, value)
top = property(_GetTop, _SetTop,
doc="""Get or set the top axis""")
| Python |
#!/usr/bin/env python
def _IsColor(color):
"""Try to determine if color is a hex color string.
Labels that look like hex colors will match too, unfortunately."""
if not isinstance(color, basestring):
return False
color = color.strip('#')
if len(color) != 3 and len(color) != 6:
return False
hex_letters = '0123456789abcdefABCDEF'
for letter in color:
if letter not in hex_letters:
return False
return True
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to line charts."""
import copy
import warnings
from mapreduce.lib.graphy import common
class LineStyle(object):
"""Represents the style for a line on a line chart. Also provides some
convenient presets.
Object attributes (Passed directly to the Google Chart API. Check there for
details):
width: Width of the line
on: Length of a line segment (for dashed/dotted lines)
off: Length of a break (for dashed/dotted lines)
color: Color of the line. A hex string, like 'ff0000' for red. Optional,
AutoColor will fill this in for you automatically if empty.
Some common styles, such as LineStyle.dashed, are available:
solid
dashed
dotted
thick_solid
thick_dashed
thick_dotted
"""
# Widths
THIN = 1
THICK = 2
# Patterns
# ((on, off) tuples, as passed to LineChart.AddLine)
SOLID = (1, 0)
DASHED = (8, 4)
DOTTED = (2, 4)
def __init__(self, width, on, off, color=None):
"""Construct a LineStyle. See class docstring for details on args."""
self.width = width
self.on = on
self.off = off
self.color = color
LineStyle.solid = LineStyle(1, 1, 0)
LineStyle.dashed = LineStyle(1, 8, 4)
LineStyle.dotted = LineStyle(1, 2, 4)
LineStyle.thick_solid = LineStyle(2, 1, 0)
LineStyle.thick_dashed = LineStyle(2, 8, 4)
LineStyle.thick_dotted = LineStyle(2, 2, 4)
class LineChart(common.BaseChart):
"""Represents a line chart."""
def __init__(self, points=None):
super(LineChart, self).__init__()
if points is not None:
self.AddLine(points)
def AddLine(self, points, label=None, color=None,
pattern=LineStyle.SOLID, width=LineStyle.THIN, markers=None):
"""Add a new line to the chart.
This is a convenience method which constructs the DataSeries and appends it
for you. It returns the new series.
points: List of equally-spaced y-values for the line
label: Name of the line (used for the legend)
color: Hex string, like 'ff0000' for red
pattern: Tuple for (length of segment, length of gap). i.e.
LineStyle.DASHED
width: Width of the line (i.e. LineStyle.THIN)
markers: List of Marker objects to attach to this line (see DataSeries
for more info)
"""
if color is not None and isinstance(color[0], common.Marker):
warnings.warn('Your code may be broken! '
'You passed a list of Markers instead of a color. The '
'old argument order (markers before color) is deprecated.',
DeprecationWarning, stacklevel=2)
style = LineStyle(width, pattern[0], pattern[1], color=color)
series = common.DataSeries(points, label=label, style=style,
markers=markers)
self.data.append(series)
return series
def AddSeries(self, points, color=None, style=LineStyle.solid, markers=None,
label=None):
"""DEPRECATED"""
warnings.warn('LineChart.AddSeries is deprecated. Call AddLine instead. ',
DeprecationWarning, stacklevel=2)
return self.AddLine(points, color=color, width=style.width,
pattern=(style.on, style.off), markers=markers,
label=label)
class Sparkline(LineChart):
"""Represent a sparkline. These behave like LineCharts,
mostly, but come without axes.
"""
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various formatters which can help format a chart
object. To use these, add them to your chart's list of formatters. For
example:
chart.formatters.append(InlineLegend)
chart.formatters.append(LabelSeparator(right=8))
Feel free to write your own formatter. Formatters are just callables that
modify the chart in some (hopefully useful) way. For example, the AutoColor
formatter makes sure each DataSeries has a color applied to it. The formatter
should take the chart to format as its only argument.
(The formatters work on a deepcopy of the user's chart, so modifications
shouldn't leak back into the user's original chart)
"""
def AutoLegend(chart):
"""Automatically fill out the legend based on series labels. This will only
fill out the legend if is at least one series with a label.
"""
chart._show_legend = False
labels = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
chart._show_legend = True
if chart._show_legend:
chart._legend_labels = labels
class AutoColor(object):
"""Automatically add colors to any series without colors.
Object attributes:
colors: The list of colors (hex strings) to cycle through. You can modify
this list if you don't like the default colors.
"""
def __init__(self):
# TODO: Add a few more default colors.
# TODO: Add a default styles too, so if you don't specify color or
# style, you get a unique set of colors & styles for your data.
self.colors = ['0000ff', 'ff0000', '00dd00', '000000']
def __call__(self, chart):
index = -1
for series in chart.data:
if series.style.color is None:
index += 1
if index >= len(self.colors):
index = 0
series.style.color = self.colors[index]
class AutoScale(object):
"""If you don't set min/max on the dependent axes, this fills them in
automatically by calculating min/max dynamically from the data.
You can set just min or just max and this formatter will fill in the other
value for you automatically. For example, if you only set min then this will
set max automatically, but leave min untouched.
Charts can have multiple dependent axes (chart.left & chart.right, for
example.) If you set min/max on some axes but not others, then this formatter
copies your min/max to the un-set axes. For example, if you set up min/max on
only the right axis then your values will be automatically copied to the left
axis. (if you use different min/max values for different axes, the
precendence is undefined. So don't do that.)
"""
def __init__(self, buffer=0.05):
"""Create a new AutoScale formatter.
Args:
buffer: percentage of extra space to allocate around the chart's axes.
"""
self.buffer = buffer
def __call__(self, chart):
"""Format the chart by setting the min/max values on its dependent axis."""
if not chart.data:
return # Nothing to do.
min_value, max_value = chart.GetMinMaxValues()
if None in (min_value, max_value):
return # No data. Nothing to do.
# Honor user's choice, if they've picked min/max.
for axis in chart.GetDependentAxes():
if axis.min is not None:
min_value = axis.min
if axis.max is not None:
max_value = axis.max
buffer = (max_value - min_value) * self.buffer # Stay away from edge.
for axis in chart.GetDependentAxes():
if axis.min is None:
axis.min = min_value - buffer
if axis.max is None:
axis.max = max_value + buffer
class LabelSeparator(object):
"""Adjust the label positions to avoid having them overlap. This happens for
any axis with minimum_label_spacing set.
"""
def __init__(self, left=None, right=None, bottom=None):
self.left = left
self.right = right
self.bottom = bottom
def __call__(self, chart):
self.AdjustLabels(chart.left, self.left)
self.AdjustLabels(chart.right, self.right)
self.AdjustLabels(chart.bottom, self.bottom)
def AdjustLabels(self, axis, minimum_label_spacing):
if minimum_label_spacing is None:
return
if len(axis.labels) <= 1: # Nothing to adjust
return
if axis.max is not None and axis.min is not None:
# Find the spacing required to fit all labels evenly.
# Don't try to push them farther apart than that.
maximum_possible_spacing = (axis.max - axis.min) / (len(axis.labels) - 1)
if minimum_label_spacing > maximum_possible_spacing:
minimum_label_spacing = maximum_possible_spacing
labels = [list(x) for x in zip(axis.label_positions, axis.labels)]
labels = sorted(labels, reverse=True)
# First pass from the top, moving colliding labels downward
for i in range(1, len(labels)):
if labels[i - 1][0] - labels[i][0] < minimum_label_spacing:
new_position = labels[i - 1][0] - minimum_label_spacing
if axis.min is not None and new_position < axis.min:
new_position = axis.min
labels[i][0] = new_position
# Second pass from the bottom, moving colliding labels upward
for i in range(len(labels) - 2, -1, -1):
if labels[i][0] - labels[i + 1][0] < minimum_label_spacing:
new_position = labels[i + 1][0] + minimum_label_spacing
if axis.max is not None and new_position > axis.max:
new_position = axis.max
labels[i][0] = new_position
# Separate positions and labels
label_positions, labels = zip(*labels)
axis.labels = labels
axis.label_positions = label_positions
def InlineLegend(chart):
"""Provide a legend for line charts by attaching labels to the right
end of each line. Supresses the regular legend.
"""
show = False
labels = []
label_positions = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
show = True
label_positions.append(series.data[-1])
if show:
chart.right.min = chart.left.min
chart.right.max = chart.left.max
chart.right.labels = labels
chart.right.label_positions = label_positions
chart._show_legend = False # Supress the regular legend.
| Python |
#!/usr/bin/env python
def _IsColor(color):
"""Try to determine if color is a hex color string.
Labels that look like hex colors will match too, unfortunately."""
if not isinstance(color, basestring):
return False
color = color.strip('#')
if len(color) != 3 and len(color) != 6:
return False
hex_letters = '0123456789abcdefABCDEF'
for letter in color:
if letter not in hex_letters:
return False
return True
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code related to bar charts."""
import copy
import warnings
from mapreduce.lib.graphy import common
from mapreduce.lib.graphy import util
class BarsStyle(object):
"""Style of a series of bars in a BarChart
Object Attributes:
color: Hex string, like '00ff00' for green
"""
def __init__(self, color):
self.color = color
class BarChartStyle(object):
"""Represents the style for bars on a BarChart.
Any of the object attributes may be set to None, in which case the
value will be auto-calculated.
Object Attributes:
bar_thickness: The thickness of a bar, in pixels.
bar_gap: The gap between bars, in pixels, or as a fraction of bar thickness
if use_fractional_gap_spacing is True.
group_gap: The gap between groups of bars, in pixels, or as a fraction of
bar thickness if use_fractional_gap_spacing is True.
use_fractional_gap_spacing: if True, bar_gap and group_gap specify gap
sizes as a fraction of bar width. Default is False.
"""
_DEFAULT_GROUP_GAP = 8
_DEFAULT_BAR_GAP = 4
def __init__(self, bar_thickness=None,
bar_gap=_DEFAULT_BAR_GAP, group_gap=_DEFAULT_GROUP_GAP,
use_fractional_gap_spacing=False):
"""Create a new BarChartStyle.
Args:
bar_thickness: The thickness of a bar, in pixels. Set this to None if
you want the bar thickness to be auto-calculated (this is the default
behaviour).
bar_gap: The gap between bars, in pixels. Default is 4.
group_gap: The gap between groups of bars, in pixels. Default is 8.
"""
self.bar_thickness = bar_thickness
self.bar_gap = bar_gap
self.group_gap = group_gap
self.use_fractional_gap_spacing = use_fractional_gap_spacing
class BarStyle(BarChartStyle):
def __init__(self, *args, **kwargs):
warnings.warn('BarStyle is deprecated. Use BarChartStyle.',
DeprecationWarning, stacklevel=2)
super(BarStyle, self).__init__(*args, **kwargs)
class BarChart(common.BaseChart):
"""Represents a bar chart.
Object attributes:
vertical: if True, the bars will be vertical. Default is True.
stacked: if True, the bars will be stacked. Default is False.
style: The BarChartStyle for all bars on this chart, specifying bar
thickness and gaps between bars.
"""
def __init__(self, points=None):
"""Constructor for BarChart objects."""
super(BarChart, self).__init__()
if points is not None:
self.AddBars(points)
self.vertical = True
self.stacked = False
self.style = BarChartStyle(None, None, None) # full auto
def AddBars(self, points, label=None, color=None):
"""Add a series of bars to the chart.
points: List of y-values for the bars in this series
label: Name of the series (used in the legend)
color: Hex string, like '00ff00' for green
This is a convenience method which constructs & appends the DataSeries for
you.
"""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label is a hex triplet. Maybe it is a color? The '
'old argument order (color before label) is deprecated.',
DeprecationWarning, stacklevel=2)
style = BarsStyle(color)
series = common.DataSeries(points, label=label, style=style)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Get the dependendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
else:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
def GetIndependentAxes(self):
"""Get the independendant axes, which depend on orientation."""
if self.vertical:
return (self._axes[common.AxisPosition.TOP] +
self._axes[common.AxisPosition.BOTTOM])
else:
return (self._axes[common.AxisPosition.LEFT] +
self._axes[common.AxisPosition.RIGHT])
def GetDependentAxis(self):
"""Get the main dependendant axis, which depends on orientation."""
if self.vertical:
return self.left
else:
return self.bottom
def GetIndependentAxis(self):
"""Get the main independendant axis, which depends on orientation."""
if self.vertical:
return self.bottom
else:
return self.left
def GetMinMaxValues(self):
"""Get the largest & smallest bar values as (min_value, max_value)."""
if not self.stacked:
return super(BarChart, self).GetMinMaxValues()
if not self.data:
return None, None # No data, nothing to do.
num_bars = max(len(series.data) for series in self.data)
positives = [0 for i in xrange(0, num_bars)]
negatives = list(positives)
for series in self.data:
for i, point in enumerate(series.data):
if point:
if point > 0:
positives[i] += point
else:
negatives[i] += point
min_value = min(min(positives), min(negatives))
max_value = max(max(positives), max(negatives))
return min_value, max_value
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code common to all chart types."""
import copy
import warnings
from mapreduce.lib.graphy import formatters
from mapreduce.lib.graphy import util
class Marker(object):
"""Represents an abstract marker, without position. You can attach these to
a DataSeries.
Object attributes:
shape: One of the shape codes (Marker.arrow, Marker.diamond, etc.)
color: color (as hex string, f.ex. '0000ff' for blue)
size: size of the marker
"""
# TODO: Write an example using markers.
# Shapes:
arrow = 'a'
cross = 'c'
diamond = 'd'
circle = 'o'
square = 's'
x = 'x'
# Note: The Google Chart API also knows some other markers ('v', 'V', 'r',
# 'b') that I think would fit better into a grid API.
# TODO: Make such a grid API
def __init__(self, shape, color, size):
"""Construct a Marker. See class docstring for details on args."""
# TODO: Shapes 'r' and 'b' would be much easier to use if they had a
# special-purpose API (instead of trying to fake it with markers)
self.shape = shape
self.color = color
self.size = size
class _BasicStyle(object):
"""Basic style object. Used internally."""
def __init__(self, color):
self.color = color
class DataSeries(object):
"""Represents one data series for a chart (both data & presentation
information).
Object attributes:
points: List of numbers representing y-values (x-values are not specified
because the Google Chart API expects even x-value spacing).
label: String with the series' label in the legend. The chart will only
have a legend if at least one series has a label. If some series
do not have a label then they will have an empty description in
the legend. This is currently a limitation in the Google Chart
API.
style: A chart-type-specific style object. (LineStyle for LineChart,
BarsStyle for BarChart, etc.)
markers: List of (x, m) tuples where m is a Marker object and x is the
x-axis value to place it at.
The "fill" markers ('r' & 'b') are a little weird because they
aren't a point on a line. For these, you can fake it by
passing slightly weird data (I'd like a better API for them at
some point):
For 'b', you attach the marker to the starting series, and set x
to the index of the ending line. Size is ignored, I think.
For 'r', you can attach to any line, specify the starting
y-value for x and the ending y-value for size. Y, in this case,
is becase 0.0 (bottom) and 1.0 (top).
color: DEPRECATED
"""
# TODO: Should we require the points list to be non-empty ?
# TODO: Do markers belong here? They are really only used for LineCharts
def __init__(self, points, label=None, style=None, markers=None, color=None):
"""Construct a DataSeries. See class docstring for details on args."""
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! Label is a hex triplet. Maybe '
'it is a color? The old argument order (color & style '
'before label) is deprecated.', DeprecationWarning,
stacklevel=2)
if color is not None:
warnings.warn('Passing color is deprecated. Pass a style object '
'instead.', DeprecationWarning, stacklevel=2)
# Attempt to fix it for them. If they also passed a style, honor it.
if style is None:
style = _BasicStyle(color)
if style is not None and isinstance(style, basestring):
warnings.warn('Your code is broken! Style is a string, not an object. '
'Maybe you are passing a color? Passing color is '
'deprecated; pass a style object instead.',
DeprecationWarning, stacklevel=2)
if style is None:
style = _BasicStyle(None)
self.data = points
self.style = style
self.markers = markers or []
self.label = label
def _GetColor(self):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
return self.style.color
def _SetColor(self, color):
warnings.warn('DataSeries.color is deprecated, use '
'DataSeries.style.color instead.', DeprecationWarning,
stacklevel=2)
self.style.color = color
color = property(_GetColor, _SetColor)
class AxisPosition(object):
"""Represents all the available axis positions.
The available positions are as follows:
AxisPosition.TOP
AxisPosition.BOTTOM
AxisPosition.LEFT
AxisPosition.RIGHT
"""
LEFT = 'y'
RIGHT = 'r'
BOTTOM = 'x'
TOP = 't'
class Axis(object):
"""Represents one axis.
Object setings:
min: Minimum value for the bottom or left end of the axis
max: Max value.
labels: List of labels to show along the axis.
label_positions: List of positions to show the labels at. Uses the scale
set by min & max, so if you set min = 0 and max = 10, then
label positions [0, 5, 10] would be at the bottom,
middle, and top of the axis, respectively.
grid_spacing: Amount of space between gridlines (in min/max scale).
A value of 0 disables gridlines.
label_gridlines: If True, draw a line extending from each label
on the axis all the way across the chart.
"""
def __init__(self, axis_min=None, axis_max=None):
"""Construct a new Axis.
Args:
axis_min: smallest value on the axis
axis_max: largest value on the axis
"""
self.min = axis_min
self.max = axis_max
self.labels = []
self.label_positions = []
self.grid_spacing = 0
self.label_gridlines = False
# TODO: Add other chart types. Order of preference:
# - scatter plots
# - us/world maps
class BaseChart(object):
"""Base chart object with standard behavior for all other charts.
Object attributes:
data: List of DataSeries objects. Chart subtypes provide convenience
functions (like AddLine, AddBars, AddSegment) to add more series
later.
left/right/bottom/top: Axis objects for the 4 different axes.
formatters: A list of callables which will be used to format this chart for
display. TODO: Need better documentation for how these
work.
auto_scale, auto_color, auto_legend:
These aliases let users access the default formatters without poking
around in self.formatters. If the user removes them from
self.formatters then they will no longer be enabled, even though they'll
still be accessible through the aliases. Similarly, re-assigning the
aliases has no effect on the contents of self.formatters.
display: This variable is reserved for backends to populate with a display
object. The intention is that the display object would be used to
render this chart. The details of what gets put here depends on
the specific backend you are using.
"""
# Canonical ordering of position keys
_POSITION_CODES = 'yrxt'
# TODO: Add more inline args to __init__ (esp. labels).
# TODO: Support multiple series in the constructor, if given.
def __init__(self):
"""Construct a BaseChart object."""
self.data = []
self._axes = {}
for code in self._POSITION_CODES:
self._axes[code] = [Axis()]
self._legend_labels = [] # AutoLegend fills this out
self._show_legend = False # AutoLegend fills this out
# Aliases for default formatters
self.auto_color = formatters.AutoColor()
self.auto_scale = formatters.AutoScale()
self.auto_legend = formatters.AutoLegend
self.formatters = [self.auto_color, self.auto_scale, self.auto_legend]
# display is used to convert the chart into something displayable (like a
# url or img tag).
self.display = None
def AddFormatter(self, formatter):
"""Add a new formatter to the chart (convenience method)."""
self.formatters.append(formatter)
def AddSeries(self, points, color=None, style=None, markers=None,
label=None):
"""DEPRECATED
Add a new series of data to the chart; return the DataSeries object."""
warnings.warn('AddSeries is deprecated. Instead, call AddLine for '
'LineCharts, AddBars for BarCharts, AddSegment for '
'PieCharts ', DeprecationWarning, stacklevel=2)
series = DataSeries(points, color=color, style=style, markers=markers,
label=label)
self.data.append(series)
return series
def GetDependentAxes(self):
"""Return any dependent axes ('left' and 'right' by default for LineCharts,
although bar charts would use 'bottom' and 'top').
"""
return self._axes[AxisPosition.LEFT] + self._axes[AxisPosition.RIGHT]
def GetIndependentAxes(self):
"""Return any independent axes (normally top & bottom, although horizontal
bar charts use left & right by default).
"""
return self._axes[AxisPosition.TOP] + self._axes[AxisPosition.BOTTOM]
def GetDependentAxis(self):
"""Return this chart's main dependent axis (often 'left', but
horizontal bar-charts use 'bottom').
"""
return self.left
def GetIndependentAxis(self):
"""Return this chart's main independent axis (often 'bottom', but
horizontal bar-charts use 'left').
"""
return self.bottom
def _Clone(self):
"""Make a deep copy this chart.
Formatters & display will be missing from the copy, due to limitations in
deepcopy.
"""
orig_values = {}
# Things which deepcopy will likely choke on if it tries to copy.
uncopyables = ['formatters', 'display', 'auto_color', 'auto_scale',
'auto_legend']
for name in uncopyables:
orig_values[name] = getattr(self, name)
setattr(self, name, None)
clone = copy.deepcopy(self)
for name, orig_value in orig_values.iteritems():
setattr(self, name, orig_value)
return clone
def GetFormattedChart(self):
"""Get a copy of the chart with formatting applied."""
# Formatters need to mutate the chart, but we don't want to change it out
# from under the user. So, we work on a copy of the chart.
scratchpad = self._Clone()
for formatter in self.formatters:
formatter(scratchpad)
return scratchpad
def GetMinMaxValues(self):
"""Get the largest & smallest values in this chart, returned as
(min_value, max_value). Takes into account complciations like stacked data
series.
For example, with non-stacked series, a chart with [1, 2, 3] and [4, 5, 6]
would return (1, 6). If the same chart was stacking the data series, it
would return (5, 9).
"""
MinPoint = lambda data: min(x for x in data if x is not None)
MaxPoint = lambda data: max(x for x in data if x is not None)
mins = [MinPoint(series.data) for series in self.data if series.data]
maxes = [MaxPoint(series.data) for series in self.data if series.data]
if not mins or not maxes:
return None, None # No data, just bail.
return min(mins), max(maxes)
def AddAxis(self, position, axis):
"""Add an axis to this chart in the given position.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to add, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, []).append(axis)
return axis
def GetAxis(self, position):
"""Get or create the first available axis in the given position.
This is a helper method for the left, right, top, and bottom properties.
If the specified axis does not exist, it will be created.
Args:
position: the position to search for
Returns:
The first axis in the given position
"""
# Not using setdefault here just in case, to avoid calling the Axis()
# constructor needlessly
if position in self._axes:
return self._axes[position][0]
else:
axis = Axis()
self._axes[position] = [axis]
return axis
def SetAxis(self, position, axis):
"""Set the first axis in the given position to the given value.
This is a helper method for the left, right, top, and bottom properties.
Args:
position: an AxisPosition object specifying the axis's position
axis: The axis to set, an Axis object
Returns:
the value of the axis parameter
"""
self._axes.setdefault(position, [None])[0] = axis
return axis
def _GetAxes(self):
"""Return a generator of (position_code, Axis) tuples for this chart's axes.
The axes will be sorted by position using the canonical ordering sequence,
_POSITION_CODES.
"""
for code in self._POSITION_CODES:
for axis in self._axes.get(code, []):
yield (code, axis)
def _GetBottom(self):
return self.GetAxis(AxisPosition.BOTTOM)
def _SetBottom(self, value):
self.SetAxis(AxisPosition.BOTTOM, value)
bottom = property(_GetBottom, _SetBottom,
doc="""Get or set the bottom axis""")
def _GetLeft(self):
return self.GetAxis(AxisPosition.LEFT)
def _SetLeft(self, value):
self.SetAxis(AxisPosition.LEFT, value)
left = property(_GetLeft, _SetLeft,
doc="""Get or set the left axis""")
def _GetRight(self):
return self.GetAxis(AxisPosition.RIGHT)
def _SetRight(self, value):
self.SetAxis(AxisPosition.RIGHT, value)
right = property(_GetRight, _SetRight,
doc="""Get or set the right axis""")
def _GetTop(self):
return self.GetAxis(AxisPosition.TOP)
def _SetTop(self, value):
self.SetAxis(AxisPosition.TOP, value)
top = property(_GetTop, _SetTop,
doc="""Get or set the top axis""")
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for pie charts."""
import warnings
from mapreduce.lib.graphy import common
from mapreduce.lib.graphy import util
class Segment(common.DataSeries):
"""A single segment of the pie chart.
Object attributes:
size: relative size of the segment
label: label of the segment (if any)
color: color of the segment (if any)
"""
def __init__(self, size, label=None, color=None):
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label looks like a hex triplet; it might be a color. '
'The old argument order (color before label) is '
'deprecated.',
DeprecationWarning, stacklevel=2)
style = common._BasicStyle(color)
super(Segment, self).__init__([size], label=label, style=style)
assert size >= 0
def _GetSize(self):
return self.data[0]
def _SetSize(self, value):
assert value >= 0
self.data[0] = value
size = property(_GetSize, _SetSize,
doc = """The relative size of this pie segment.""")
# Since Segments are so simple, provide color for convenience.
def _GetColor(self):
return self.style.color
def _SetColor(self, color):
self.style.color = color
color = property(_GetColor, _SetColor,
doc = """The color of this pie segment.""")
class PieChart(common.BaseChart):
"""Represents a pie chart.
The pie chart consists of a single "pie" by default, but additional pies
may be added using the AddPie method. The Google Chart API will display
the pies as concentric circles, with pie #0 on the inside; other backends
may display the pies differently.
"""
def __init__(self, points=None, labels=None, colors=None):
"""Constructor for PieChart objects.
Creates a pie chart with a single pie.
Args:
points: A list of data points for the pie chart;
i.e., relative sizes of the pie segments
labels: A list of labels for the pie segments.
TODO: Allow the user to pass in None as one of
the labels in order to skip that label.
colors: A list of colors for the pie segments, as hex strings
(f.ex. '0000ff' for blue). If there are less colors than pie
segments, the Google Chart API will attempt to produce a smooth
color transition between segments by spreading the colors across
them.
"""
super(PieChart, self).__init__()
self.formatters = []
self._colors = None
if points:
self.AddPie(points, labels, colors)
def AddPie(self, points, labels=None, colors=None):
"""Add a whole pie to the chart.
Args:
points: A list of pie segment sizes
labels: A list of labels for the pie segments
colors: A list of colors for the segments. Missing colors will be chosen
automatically.
Return:
The index of the newly added pie.
"""
num_colors = len(colors or [])
num_labels = len(labels or [])
pie_index = len(self.data)
self.data.append([])
for i, pt in enumerate(points):
label = None
if i < num_labels:
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color, pie_index=pie_index)
return pie_index
def AddSegments(self, points, labels, colors):
"""DEPRECATED."""
warnings.warn('PieChart.AddSegments is deprecated. Call AddPie instead. ',
DeprecationWarning, stacklevel=2)
num_colors = len(colors or [])
for i, pt in enumerate(points):
assert pt >= 0
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color)
def AddSegment(self, size, label=None, color=None, pie_index=0):
"""Add a pie segment to this chart, and return the segment.
size: The size of the segment.
label: The label for the segment.
color: The color of the segment, or None to automatically choose the color.
pie_index: The index of the pie that will receive the new segment.
By default, the chart has one pie (pie #0); use the AddPie method to
add more pies.
"""
if isinstance(size, Segment):
warnings.warn("AddSegment(segment) is deprecated. Use AddSegment(size, "
"label, color) instead", DeprecationWarning, stacklevel=2)
segment = size
else:
segment = Segment(size, label=label, color=color)
assert segment.size >= 0
if pie_index == 0 and not self.data:
# Create the default pie
self.data.append([])
assert (pie_index >= 0 and pie_index < len(self.data))
self.data[pie_index].append(segment)
return segment
def AddSeries(self, points, color=None, style=None, markers=None, label=None):
"""DEPRECATED
Add a new segment to the chart and return it.
The segment must contain exactly one data point; all parameters
other than color and label are ignored.
"""
warnings.warn('PieChart.AddSeries is deprecated. Call AddSegment or '
'AddSegments instead.', DeprecationWarning)
return self.AddSegment(Segment(points[0], color=color, label=label))
def SetColors(self, *colors):
"""Change the colors of this chart to the specified list of colors.
Note that this will completely override the individual colors specified
in the pie segments. Missing colors will be interpolated, so that the
list of colors covers all segments in all the pies.
"""
self._colors = colors
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend which can generate charts using the Google Chart API."""
from mapreduce.lib.graphy import line_chart
from mapreduce.lib.graphy import bar_chart
from mapreduce.lib.graphy import pie_chart
from mapreduce.lib.graphy.backends.google_chart_api import encoders
def _GetChartFactory(chart_class, display_class):
"""Create a factory method for instantiating charts with displays.
Returns a method which, when called, will create & return a chart with
chart.display already populated.
"""
def Inner(*args, **kwargs):
chart = chart_class(*args, **kwargs)
chart.display = display_class(chart)
return chart
return Inner
# These helper methods make it easy to get chart objects with display
# objects already setup. For example, this:
# chart = google_chart_api.LineChart()
# is equivalent to:
# chart = line_chart.LineChart()
# chart.display = google_chart_api.LineChartEncoder()
#
# (If there's some chart type for which a helper method isn't available, you
# can always just instantiate the correct encoder manually, like in the 2nd
# example above).
# TODO: fix these so they have nice docs in ipython (give them __doc__)
LineChart = _GetChartFactory(line_chart.LineChart, encoders.LineChartEncoder)
Sparkline = _GetChartFactory(line_chart.Sparkline, encoders.SparklineEncoder)
BarChart = _GetChartFactory(bar_chart.BarChart, encoders.BarChartEncoder)
PieChart = _GetChartFactory(pie_chart.PieChart, encoders.PieChartEncoder)
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display objects for the different kinds of charts.
Not intended for end users, use the methods in __init__ instead."""
import warnings
from mapreduce.lib.graphy.backends.google_chart_api import util
class BaseChartEncoder(object):
"""Base class for encoders which turn chart objects into Google Chart URLS.
Object attributes:
extra_params: Dict to add/override specific chart params. Of the
form param:string, passed directly to the Google Chart API.
For example, 'cht':'lti' becomes ?cht=lti in the URL.
url_base: The prefix to use for URLs. If you want to point to a different
server for some reason, you would override this.
formatters: TODO: Need to explain how these work, and how they are
different from chart formatters.
enhanced_encoding: If True, uses enhanced encoding. If
False, simple encoding is used.
escape_url: If True, URL will be properly escaped. If False, characters
like | and , will be unescapped (which makes the URL easier to
read).
"""
def __init__(self, chart):
self.extra_params = {} # You can add specific params here.
self.url_base = 'http://chart.apis.google.com/chart'
self.formatters = self._GetFormatters()
self.chart = chart
self.enhanced_encoding = False
self.escape_url = True # You can turn off URL escaping for debugging.
self._width = 0 # These are set when someone calls Url()
self._height = 0
def Url(self, width, height, use_html_entities=False):
"""Get the URL for our graph.
Args:
use_html_entities: If True, reserved HTML characters (&, <, >, ") in the
URL are replaced with HTML entities (&, <, etc.). Default is False.
"""
self._width = width
self._height = height
params = self._Params(self.chart)
return util.EncodeUrl(self.url_base, params, self.escape_url,
use_html_entities)
def Img(self, width, height):
"""Get an image tag for our graph."""
url = self.Url(width, height, use_html_entities=True)
tag = '<img src="%s" width="%s" height="%s" alt="chart"/>'
return tag % (url, width, height)
def _GetType(self, chart):
"""Return the correct chart_type param for the chart."""
raise NotImplementedError
def _GetFormatters(self):
"""Get a list of formatter functions to use for encoding."""
formatters = [self._GetLegendParams,
self._GetDataSeriesParams,
self._GetColors,
self._GetAxisParams,
self._GetGridParams,
self._GetType,
self._GetExtraParams,
self._GetSizeParams,
]
return formatters
def _Params(self, chart):
"""Collect all the different params we need for the URL. Collecting
all params as a dict before converting to a URL makes testing easier.
"""
chart = chart.GetFormattedChart()
params = {}
def Add(new_params):
params.update(util.ShortenParameterNames(new_params))
for formatter in self.formatters:
Add(formatter(chart))
for key in params:
params[key] = str(params[key])
return params
def _GetSizeParams(self, chart):
"""Get the size param."""
return {'size': '%sx%s' % (int(self._width), int(self._height))}
def _GetExtraParams(self, chart):
"""Get any extra params (from extra_params)."""
return self.extra_params
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
y_min, y_max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
series_data = []
markers = []
for i, series in enumerate(chart.data):
data = series.data
if not data: # Drop empty series.
continue
series_data.append(data)
for x, marker in series.markers:
args = [marker.shape, marker.color, i, x, marker.size]
markers.append(','.join(str(arg) for arg in args))
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, series_data, y_min, y_max, encoder)
result.update(util.JoinLists(marker = markers))
return result
def _GetColors(self, chart):
"""Color series color parameter."""
colors = []
for series in chart.data:
if not series.data:
continue
colors.append(series.style.color)
return util.JoinLists(color = colors)
def _GetDataEncoder(self, chart):
"""Get a class which can encode the data the way the user requested."""
if not self.enhanced_encoding:
return util.SimpleDataEncoder()
return util.EnhancedDataEncoder()
def _GetLegendParams(self, chart):
"""Get params for showing a legend."""
if chart._show_legend:
return util.JoinLists(data_series_label = chart._legend_labels)
return {}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Return axis.labels & axis.label_positions."""
return axis.labels, axis.label_positions
def _GetAxisParams(self, chart):
"""Collect params related to our various axes (x, y, right-hand)."""
axis_types = []
axis_ranges = []
axis_labels = []
axis_label_positions = []
axis_label_gridlines = []
mark_length = max(self._width, self._height)
for i, axis_pair in enumerate(a for a in chart._GetAxes() if a[1].labels):
axis_type_code, axis = axis_pair
axis_types.append(axis_type_code)
if axis.min is not None or axis.max is not None:
assert axis.min is not None # Sanity check: both min & max must be set.
assert axis.max is not None
axis_ranges.append('%s,%s,%s' % (i, axis.min, axis.max))
labels, positions = self._GetAxisLabelsAndPositions(axis, chart)
if labels:
axis_labels.append('%s:' % i)
axis_labels.extend(labels)
if positions:
positions = [i] + list(positions)
axis_label_positions.append(','.join(str(x) for x in positions))
if axis.label_gridlines:
axis_label_gridlines.append("%d,%d" % (i, -mark_length))
return util.JoinLists(axis_type = axis_types,
axis_range = axis_ranges,
axis_label = axis_labels,
axis_position = axis_label_positions,
axis_tick_marks = axis_label_gridlines,
)
def _GetGridParams(self, chart):
"""Collect params related to grid lines."""
x = 0
y = 0
if chart.bottom.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.bottom.min is not None)
assert(chart.bottom.max is not None)
total = float(chart.bottom.max - chart.bottom.min)
x = 100 * chart.bottom.grid_spacing / total
if chart.left.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.left.min is not None)
assert(chart.left.max is not None)
total = float(chart.left.max - chart.left.min)
y = 100 * chart.left.grid_spacing / total
if x or y:
return dict(grid = '%.3g,%.3g,1,0' % (x, y))
return {}
class LineChartEncoder(BaseChartEncoder):
"""Helper class to encode LineChart objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lc'}
def _GetLineStyles(self, chart):
"""Get LineStyle parameters."""
styles = []
for series in chart.data:
style = series.style
if style:
styles.append('%s,%s,%s' % (style.width, style.on, style.off))
else:
# If one style is missing, they must all be missing
# TODO: Add a test for this; throw a more meaningful exception
assert (not styles)
return util.JoinLists(line_style = styles)
def _GetFormatters(self):
out = super(LineChartEncoder, self)._GetFormatters()
out.insert(-2, self._GetLineStyles)
return out
class SparklineEncoder(LineChartEncoder):
"""Helper class to encode Sparkline objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lfi'}
class BarChartEncoder(BaseChartEncoder):
"""Helper class to encode BarChart objects into Google Chart URLs."""
__STYLE_DEPRECATION = ('BarChart.display.style is deprecated.' +
' Use BarChart.style, instead.')
def __init__(self, chart, style=None):
"""Construct a new BarChartEncoder.
Args:
style: DEPRECATED. Set style on the chart object itself.
"""
super(BarChartEncoder, self).__init__(chart)
if style is not None:
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
chart.style = style
def _GetType(self, chart):
# Vertical Stacked Type
types = {(True, False): 'bvg',
(True, True): 'bvs',
(False, False): 'bhg',
(False, True): 'bhs'}
return {'chart_type': types[(chart.vertical, chart.stacked)]}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Reverse labels on the y-axis in horizontal bar charts.
(Otherwise the labels come out backwards from what you would expect)
"""
if not chart.vertical and axis == chart.left:
# The left axis of horizontal bar charts needs to have reversed labels
return reversed(axis.labels), reversed(axis.label_positions)
return axis.labels, axis.label_positions
def _GetFormatters(self):
out = super(BarChartEncoder, self)._GetFormatters()
# insert at -2 to allow extra_params to overwrite everything
out.insert(-2, self._ZeroPoint)
out.insert(-2, self._ApplyBarChartStyle)
return out
def _ZeroPoint(self, chart):
"""Get the zero-point if any bars are negative."""
# (Maybe) set the zero point.
min, max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
out = {}
if min < 0:
if max < 0:
out['chp'] = 1
else:
out['chp'] = -min/float(max - min)
return out
def _ApplyBarChartStyle(self, chart):
"""If bar style is specified, fill in the missing data and apply it."""
# sanity checks
if chart.style is None or not chart.data:
return {}
(bar_thickness, bar_gap, group_gap) = (chart.style.bar_thickness,
chart.style.bar_gap,
chart.style.group_gap)
# Auto-size bar/group gaps
if bar_gap is None and group_gap is not None:
bar_gap = max(0, group_gap / 2)
if not chart.style.use_fractional_gap_spacing:
bar_gap = int(bar_gap)
if group_gap is None and bar_gap is not None:
group_gap = max(0, bar_gap * 2)
# Set bar thickness to auto if it is missing
if bar_thickness is None:
if chart.style.use_fractional_gap_spacing:
bar_thickness = 'r'
else:
bar_thickness = 'a'
else:
# Convert gap sizes to pixels if needed
if chart.style.use_fractional_gap_spacing:
if bar_gap:
bar_gap = int(bar_thickness * bar_gap)
if group_gap:
group_gap = int(bar_thickness * group_gap)
# Build a valid spec; ignore group gap if chart is stacked,
# since there are no groups in that case
spec = [bar_thickness]
if bar_gap is not None:
spec.append(bar_gap)
if group_gap is not None and not chart.stacked:
spec.append(group_gap)
return util.JoinLists(bar_size = spec)
def __GetStyle(self):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.chart.style
def __SetStyle(self, value):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
self.chart.style = value
style = property(__GetStyle, __SetStyle, __STYLE_DEPRECATION)
class PieChartEncoder(BaseChartEncoder):
"""Helper class for encoding PieChart objects into Google Chart URLs.
Fuzzy frogs frolic in the forest.
Object Attributes:
is3d: if True, draw a 3d pie chart. Default is False.
"""
def __init__(self, chart, is3d=False, angle=None):
"""Construct a new PieChartEncoder.
Args:
is3d: If True, draw a 3d pie chart. Default is False. If the pie chart
includes multiple pies, is3d must be set to False.
angle: Angle of rotation of the pie chart, in radians.
"""
super(PieChartEncoder, self).__init__(chart)
self.is3d = is3d
self.angle = None
def _GetFormatters(self):
"""Add a formatter for the chart angle."""
formatters = super(PieChartEncoder, self)._GetFormatters()
formatters.append(self._GetAngleParams)
return formatters
def _GetType(self, chart):
if len(chart.data) > 1:
if self.is3d:
warnings.warn(
'3d charts with more than one pie not supported; rendering in 2d',
RuntimeWarning, stacklevel=2)
chart_type = 'pc'
else:
if self.is3d:
chart_type = 'p3'
else:
chart_type = 'p'
return {'chart_type': chart_type}
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
pie_points = []
labels = []
max_val = 1
for pie in chart.data:
points = []
for segment in pie:
if segment:
points.append(segment.size)
max_val = max(max_val, segment.size)
labels.append(segment.label or '')
if points:
pie_points.append(points)
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, pie_points, 0, max_val, encoder)
result.update(util.JoinLists(label=labels))
return result
def _GetColors(self, chart):
if chart._colors:
# Colors were overridden by the user
colors = chart._colors
else:
# Build the list of colors from individual segments
colors = []
for pie in chart.data:
for segment in pie:
if segment and segment.color:
colors.append(segment.color)
return util.JoinLists(color = colors)
def _GetAngleParams(self, chart):
"""If the user specified an angle, add it to the params."""
if self.angle:
return {'chp' : str(self.angle)}
return {}
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with the Google Chart API.
Not intended for end users, use the methods in __init__ instead."""
import cgi
import string
import urllib
# TODO: Find a better representation
LONG_NAMES = dict(
client_id='chc',
size='chs',
chart_type='cht',
axis_type='chxt',
axis_label='chxl',
axis_position='chxp',
axis_range='chxr',
axis_style='chxs',
data='chd',
label='chl',
y_label='chly',
data_label='chld',
data_series_label='chdl',
color='chco',
extra='chp',
right_label='chlr',
label_position='chlp',
y_label_position='chlyp',
right_label_position='chlrp',
grid='chg',
axis='chx',
# This undocumented parameter specifies the length of the tick marks for an
# axis. Negative values will extend tick marks into the main graph area.
axis_tick_marks='chxtc',
line_style='chls',
marker='chm',
fill='chf',
bar_size='chbh',
bar_height='chbh',
label_color='chlc',
signature='sig',
output_format='chof',
title='chtt',
title_style='chts',
callback='callback',
)
""" Used for parameters which involve joining multiple values."""
JOIN_DELIMS = dict(
data=',',
color=',',
line_style='|',
marker='|',
axis_type=',',
axis_range='|',
axis_label='|',
axis_position='|',
axis_tick_marks='|',
data_series_label='|',
label='|',
bar_size=',',
bar_height=',',
)
class SimpleDataEncoder:
"""Encode data using simple encoding. Out-of-range data will
be dropped (encoded as '_').
"""
def __init__(self):
self.prefix = 's:'
self.code = string.ascii_uppercase + string.ascii_lowercase + string.digits
self.min = 0
self.max = len(self.code) - 1
def Encode(self, data):
return ''.join(self._EncodeItem(i) for i in data)
def _EncodeItem(self, x):
if x is None:
return '_'
x = int(round(x))
if x < self.min or x > self.max:
return '_'
return self.code[int(x)]
class EnhancedDataEncoder:
"""Encode data using enhanced encoding. Out-of-range data will
be dropped (encoded as '_').
"""
def __init__(self):
self.prefix = 'e:'
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits \
+ '-.'
self.code = [x + y for x in chars for y in chars]
self.min = 0
self.max = len(self.code) - 1
def Encode(self, data):
return ''.join(self._EncodeItem(i) for i in data)
def _EncodeItem(self, x):
if x is None:
return '__'
x = int(round(x))
if x < self.min or x > self.max:
return '__'
return self.code[int(x)]
def EncodeUrl(base, params, escape_url, use_html_entities):
"""Escape params, combine and append them to base to generate a full URL."""
real_params = []
for key, value in params.iteritems():
if escape_url:
value = urllib.quote(value)
if value:
real_params.append('%s=%s' % (key, value))
if real_params:
url = '%s?%s' % (base, '&'.join(real_params))
else:
url = base
if use_html_entities:
url = cgi.escape(url, quote=True)
return url
def ShortenParameterNames(params):
"""Shorten long parameter names (like size) to short names (like chs)."""
out = {}
for name, value in params.iteritems():
short_name = LONG_NAMES.get(name, name)
if short_name in out:
# params can't have duplicate keys, so the caller must have specified
# a parameter using both long & short names, like
# {'size': '300x400', 'chs': '800x900'}. We don't know which to use.
raise KeyError('Both long and short version of parameter %s (%s) '
'found. It is unclear which one to use.' % (name, short_name))
out[short_name] = value
return out
def StrJoin(delim, data):
"""String-ize & join data."""
return delim.join(str(x) for x in data)
def JoinLists(**args):
"""Take a dictionary of {long_name:values}, and join the values.
For each long_name, join the values into a string according to
JOIN_DELIMS. If values is empty or None, replace with an empty string.
Returns:
A dictionary {long_name:joined_value} entries.
"""
out = {}
for key, val in args.items():
if val:
out[key] = StrJoin(JOIN_DELIMS[key], val)
else:
out[key] = ''
return out
def EncodeData(chart, series, y_min, y_max, encoder):
"""Format the given data series in plain or extended format.
Use the chart's encoder to determine the format. The formatted data will
be scaled to fit within the range of values supported by the chosen
encoding.
Args:
chart: The chart.
series: A list of the the data series to format; each list element is
a list of data points.
y_min: Minimum data value. May be None if y_max is also None
y_max: Maximum data value. May be None if y_min is also None
Returns:
A dictionary with one key, 'data', whose value is the fully encoded series.
"""
assert (y_min is None) == (y_max is None)
if y_min is not None:
def _ScaleAndEncode(series):
series = ScaleData(series, y_min, y_max, encoder.min, encoder.max)
return encoder.Encode(series)
encoded_series = [_ScaleAndEncode(s) for s in series]
else:
encoded_series = [encoder.Encode(s) for s in series]
result = JoinLists(**{'data': encoded_series})
result['data'] = encoder.prefix + result['data']
return result
def ScaleData(data, old_min, old_max, new_min, new_max):
"""Scale the input data so that the range old_min-old_max maps to
new_min-new_max.
"""
def ScalePoint(x):
if x is None:
return None
return scale * x + translate
if old_min == old_max:
scale = 1
else:
scale = (new_max - new_min) / float(old_max - old_min)
translate = new_min - scale * old_min
return map(ScalePoint, data)
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for working with the Google Chart API.
Not intended for end users, use the methods in __init__ instead."""
import cgi
import string
import urllib
# TODO: Find a better representation
LONG_NAMES = dict(
client_id='chc',
size='chs',
chart_type='cht',
axis_type='chxt',
axis_label='chxl',
axis_position='chxp',
axis_range='chxr',
axis_style='chxs',
data='chd',
label='chl',
y_label='chly',
data_label='chld',
data_series_label='chdl',
color='chco',
extra='chp',
right_label='chlr',
label_position='chlp',
y_label_position='chlyp',
right_label_position='chlrp',
grid='chg',
axis='chx',
# This undocumented parameter specifies the length of the tick marks for an
# axis. Negative values will extend tick marks into the main graph area.
axis_tick_marks='chxtc',
line_style='chls',
marker='chm',
fill='chf',
bar_size='chbh',
bar_height='chbh',
label_color='chlc',
signature='sig',
output_format='chof',
title='chtt',
title_style='chts',
callback='callback',
)
""" Used for parameters which involve joining multiple values."""
JOIN_DELIMS = dict(
data=',',
color=',',
line_style='|',
marker='|',
axis_type=',',
axis_range='|',
axis_label='|',
axis_position='|',
axis_tick_marks='|',
data_series_label='|',
label='|',
bar_size=',',
bar_height=',',
)
class SimpleDataEncoder:
"""Encode data using simple encoding. Out-of-range data will
be dropped (encoded as '_').
"""
def __init__(self):
self.prefix = 's:'
self.code = string.ascii_uppercase + string.ascii_lowercase + string.digits
self.min = 0
self.max = len(self.code) - 1
def Encode(self, data):
return ''.join(self._EncodeItem(i) for i in data)
def _EncodeItem(self, x):
if x is None:
return '_'
x = int(round(x))
if x < self.min or x > self.max:
return '_'
return self.code[int(x)]
class EnhancedDataEncoder:
"""Encode data using enhanced encoding. Out-of-range data will
be dropped (encoded as '_').
"""
def __init__(self):
self.prefix = 'e:'
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits \
+ '-.'
self.code = [x + y for x in chars for y in chars]
self.min = 0
self.max = len(self.code) - 1
def Encode(self, data):
return ''.join(self._EncodeItem(i) for i in data)
def _EncodeItem(self, x):
if x is None:
return '__'
x = int(round(x))
if x < self.min or x > self.max:
return '__'
return self.code[int(x)]
def EncodeUrl(base, params, escape_url, use_html_entities):
"""Escape params, combine and append them to base to generate a full URL."""
real_params = []
for key, value in params.iteritems():
if escape_url:
value = urllib.quote(value)
if value:
real_params.append('%s=%s' % (key, value))
if real_params:
url = '%s?%s' % (base, '&'.join(real_params))
else:
url = base
if use_html_entities:
url = cgi.escape(url, quote=True)
return url
def ShortenParameterNames(params):
"""Shorten long parameter names (like size) to short names (like chs)."""
out = {}
for name, value in params.iteritems():
short_name = LONG_NAMES.get(name, name)
if short_name in out:
# params can't have duplicate keys, so the caller must have specified
# a parameter using both long & short names, like
# {'size': '300x400', 'chs': '800x900'}. We don't know which to use.
raise KeyError('Both long and short version of parameter %s (%s) '
'found. It is unclear which one to use.' % (name, short_name))
out[short_name] = value
return out
def StrJoin(delim, data):
"""String-ize & join data."""
return delim.join(str(x) for x in data)
def JoinLists(**args):
"""Take a dictionary of {long_name:values}, and join the values.
For each long_name, join the values into a string according to
JOIN_DELIMS. If values is empty or None, replace with an empty string.
Returns:
A dictionary {long_name:joined_value} entries.
"""
out = {}
for key, val in args.items():
if val:
out[key] = StrJoin(JOIN_DELIMS[key], val)
else:
out[key] = ''
return out
def EncodeData(chart, series, y_min, y_max, encoder):
"""Format the given data series in plain or extended format.
Use the chart's encoder to determine the format. The formatted data will
be scaled to fit within the range of values supported by the chosen
encoding.
Args:
chart: The chart.
series: A list of the the data series to format; each list element is
a list of data points.
y_min: Minimum data value. May be None if y_max is also None
y_max: Maximum data value. May be None if y_min is also None
Returns:
A dictionary with one key, 'data', whose value is the fully encoded series.
"""
assert (y_min is None) == (y_max is None)
if y_min is not None:
def _ScaleAndEncode(series):
series = ScaleData(series, y_min, y_max, encoder.min, encoder.max)
return encoder.Encode(series)
encoded_series = [_ScaleAndEncode(s) for s in series]
else:
encoded_series = [encoder.Encode(s) for s in series]
result = JoinLists(**{'data': encoded_series})
result['data'] = encoder.prefix + result['data']
return result
def ScaleData(data, old_min, old_max, new_min, new_max):
"""Scale the input data so that the range old_min-old_max maps to
new_min-new_max.
"""
def ScalePoint(x):
if x is None:
return None
return scale * x + translate
if old_min == old_max:
scale = 1
else:
scale = (new_max - new_min) / float(old_max - old_min)
translate = new_min - scale * old_min
return map(ScalePoint, data)
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display objects for the different kinds of charts.
Not intended for end users, use the methods in __init__ instead."""
import warnings
from mapreduce.lib.graphy.backends.google_chart_api import util
class BaseChartEncoder(object):
"""Base class for encoders which turn chart objects into Google Chart URLS.
Object attributes:
extra_params: Dict to add/override specific chart params. Of the
form param:string, passed directly to the Google Chart API.
For example, 'cht':'lti' becomes ?cht=lti in the URL.
url_base: The prefix to use for URLs. If you want to point to a different
server for some reason, you would override this.
formatters: TODO: Need to explain how these work, and how they are
different from chart formatters.
enhanced_encoding: If True, uses enhanced encoding. If
False, simple encoding is used.
escape_url: If True, URL will be properly escaped. If False, characters
like | and , will be unescapped (which makes the URL easier to
read).
"""
def __init__(self, chart):
self.extra_params = {} # You can add specific params here.
self.url_base = 'http://chart.apis.google.com/chart'
self.formatters = self._GetFormatters()
self.chart = chart
self.enhanced_encoding = False
self.escape_url = True # You can turn off URL escaping for debugging.
self._width = 0 # These are set when someone calls Url()
self._height = 0
def Url(self, width, height, use_html_entities=False):
"""Get the URL for our graph.
Args:
use_html_entities: If True, reserved HTML characters (&, <, >, ") in the
URL are replaced with HTML entities (&, <, etc.). Default is False.
"""
self._width = width
self._height = height
params = self._Params(self.chart)
return util.EncodeUrl(self.url_base, params, self.escape_url,
use_html_entities)
def Img(self, width, height):
"""Get an image tag for our graph."""
url = self.Url(width, height, use_html_entities=True)
tag = '<img src="%s" width="%s" height="%s" alt="chart"/>'
return tag % (url, width, height)
def _GetType(self, chart):
"""Return the correct chart_type param for the chart."""
raise NotImplementedError
def _GetFormatters(self):
"""Get a list of formatter functions to use for encoding."""
formatters = [self._GetLegendParams,
self._GetDataSeriesParams,
self._GetColors,
self._GetAxisParams,
self._GetGridParams,
self._GetType,
self._GetExtraParams,
self._GetSizeParams,
]
return formatters
def _Params(self, chart):
"""Collect all the different params we need for the URL. Collecting
all params as a dict before converting to a URL makes testing easier.
"""
chart = chart.GetFormattedChart()
params = {}
def Add(new_params):
params.update(util.ShortenParameterNames(new_params))
for formatter in self.formatters:
Add(formatter(chart))
for key in params:
params[key] = str(params[key])
return params
def _GetSizeParams(self, chart):
"""Get the size param."""
return {'size': '%sx%s' % (int(self._width), int(self._height))}
def _GetExtraParams(self, chart):
"""Get any extra params (from extra_params)."""
return self.extra_params
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
y_min, y_max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
series_data = []
markers = []
for i, series in enumerate(chart.data):
data = series.data
if not data: # Drop empty series.
continue
series_data.append(data)
for x, marker in series.markers:
args = [marker.shape, marker.color, i, x, marker.size]
markers.append(','.join(str(arg) for arg in args))
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, series_data, y_min, y_max, encoder)
result.update(util.JoinLists(marker = markers))
return result
def _GetColors(self, chart):
"""Color series color parameter."""
colors = []
for series in chart.data:
if not series.data:
continue
colors.append(series.style.color)
return util.JoinLists(color = colors)
def _GetDataEncoder(self, chart):
"""Get a class which can encode the data the way the user requested."""
if not self.enhanced_encoding:
return util.SimpleDataEncoder()
return util.EnhancedDataEncoder()
def _GetLegendParams(self, chart):
"""Get params for showing a legend."""
if chart._show_legend:
return util.JoinLists(data_series_label = chart._legend_labels)
return {}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Return axis.labels & axis.label_positions."""
return axis.labels, axis.label_positions
def _GetAxisParams(self, chart):
"""Collect params related to our various axes (x, y, right-hand)."""
axis_types = []
axis_ranges = []
axis_labels = []
axis_label_positions = []
axis_label_gridlines = []
mark_length = max(self._width, self._height)
for i, axis_pair in enumerate(a for a in chart._GetAxes() if a[1].labels):
axis_type_code, axis = axis_pair
axis_types.append(axis_type_code)
if axis.min is not None or axis.max is not None:
assert axis.min is not None # Sanity check: both min & max must be set.
assert axis.max is not None
axis_ranges.append('%s,%s,%s' % (i, axis.min, axis.max))
labels, positions = self._GetAxisLabelsAndPositions(axis, chart)
if labels:
axis_labels.append('%s:' % i)
axis_labels.extend(labels)
if positions:
positions = [i] + list(positions)
axis_label_positions.append(','.join(str(x) for x in positions))
if axis.label_gridlines:
axis_label_gridlines.append("%d,%d" % (i, -mark_length))
return util.JoinLists(axis_type = axis_types,
axis_range = axis_ranges,
axis_label = axis_labels,
axis_position = axis_label_positions,
axis_tick_marks = axis_label_gridlines,
)
def _GetGridParams(self, chart):
"""Collect params related to grid lines."""
x = 0
y = 0
if chart.bottom.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.bottom.min is not None)
assert(chart.bottom.max is not None)
total = float(chart.bottom.max - chart.bottom.min)
x = 100 * chart.bottom.grid_spacing / total
if chart.left.grid_spacing:
# min/max must be set for this to make sense.
assert(chart.left.min is not None)
assert(chart.left.max is not None)
total = float(chart.left.max - chart.left.min)
y = 100 * chart.left.grid_spacing / total
if x or y:
return dict(grid = '%.3g,%.3g,1,0' % (x, y))
return {}
class LineChartEncoder(BaseChartEncoder):
"""Helper class to encode LineChart objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lc'}
def _GetLineStyles(self, chart):
"""Get LineStyle parameters."""
styles = []
for series in chart.data:
style = series.style
if style:
styles.append('%s,%s,%s' % (style.width, style.on, style.off))
else:
# If one style is missing, they must all be missing
# TODO: Add a test for this; throw a more meaningful exception
assert (not styles)
return util.JoinLists(line_style = styles)
def _GetFormatters(self):
out = super(LineChartEncoder, self)._GetFormatters()
out.insert(-2, self._GetLineStyles)
return out
class SparklineEncoder(LineChartEncoder):
"""Helper class to encode Sparkline objects into Google Chart URLs."""
def _GetType(self, chart):
return {'chart_type': 'lfi'}
class BarChartEncoder(BaseChartEncoder):
"""Helper class to encode BarChart objects into Google Chart URLs."""
__STYLE_DEPRECATION = ('BarChart.display.style is deprecated.' +
' Use BarChart.style, instead.')
def __init__(self, chart, style=None):
"""Construct a new BarChartEncoder.
Args:
style: DEPRECATED. Set style on the chart object itself.
"""
super(BarChartEncoder, self).__init__(chart)
if style is not None:
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
chart.style = style
def _GetType(self, chart):
# Vertical Stacked Type
types = {(True, False): 'bvg',
(True, True): 'bvs',
(False, False): 'bhg',
(False, True): 'bhs'}
return {'chart_type': types[(chart.vertical, chart.stacked)]}
def _GetAxisLabelsAndPositions(self, axis, chart):
"""Reverse labels on the y-axis in horizontal bar charts.
(Otherwise the labels come out backwards from what you would expect)
"""
if not chart.vertical and axis == chart.left:
# The left axis of horizontal bar charts needs to have reversed labels
return reversed(axis.labels), reversed(axis.label_positions)
return axis.labels, axis.label_positions
def _GetFormatters(self):
out = super(BarChartEncoder, self)._GetFormatters()
# insert at -2 to allow extra_params to overwrite everything
out.insert(-2, self._ZeroPoint)
out.insert(-2, self._ApplyBarChartStyle)
return out
def _ZeroPoint(self, chart):
"""Get the zero-point if any bars are negative."""
# (Maybe) set the zero point.
min, max = chart.GetDependentAxis().min, chart.GetDependentAxis().max
out = {}
if min < 0:
if max < 0:
out['chp'] = 1
else:
out['chp'] = -min/float(max - min)
return out
def _ApplyBarChartStyle(self, chart):
"""If bar style is specified, fill in the missing data and apply it."""
# sanity checks
if chart.style is None or not chart.data:
return {}
(bar_thickness, bar_gap, group_gap) = (chart.style.bar_thickness,
chart.style.bar_gap,
chart.style.group_gap)
# Auto-size bar/group gaps
if bar_gap is None and group_gap is not None:
bar_gap = max(0, group_gap / 2)
if not chart.style.use_fractional_gap_spacing:
bar_gap = int(bar_gap)
if group_gap is None and bar_gap is not None:
group_gap = max(0, bar_gap * 2)
# Set bar thickness to auto if it is missing
if bar_thickness is None:
if chart.style.use_fractional_gap_spacing:
bar_thickness = 'r'
else:
bar_thickness = 'a'
else:
# Convert gap sizes to pixels if needed
if chart.style.use_fractional_gap_spacing:
if bar_gap:
bar_gap = int(bar_thickness * bar_gap)
if group_gap:
group_gap = int(bar_thickness * group_gap)
# Build a valid spec; ignore group gap if chart is stacked,
# since there are no groups in that case
spec = [bar_thickness]
if bar_gap is not None:
spec.append(bar_gap)
if group_gap is not None and not chart.stacked:
spec.append(group_gap)
return util.JoinLists(bar_size = spec)
def __GetStyle(self):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
return self.chart.style
def __SetStyle(self, value):
warnings.warn(self.__STYLE_DEPRECATION, DeprecationWarning, stacklevel=2)
self.chart.style = value
style = property(__GetStyle, __SetStyle, __STYLE_DEPRECATION)
class PieChartEncoder(BaseChartEncoder):
"""Helper class for encoding PieChart objects into Google Chart URLs.
Fuzzy frogs frolic in the forest.
Object Attributes:
is3d: if True, draw a 3d pie chart. Default is False.
"""
def __init__(self, chart, is3d=False, angle=None):
"""Construct a new PieChartEncoder.
Args:
is3d: If True, draw a 3d pie chart. Default is False. If the pie chart
includes multiple pies, is3d must be set to False.
angle: Angle of rotation of the pie chart, in radians.
"""
super(PieChartEncoder, self).__init__(chart)
self.is3d = is3d
self.angle = None
def _GetFormatters(self):
"""Add a formatter for the chart angle."""
formatters = super(PieChartEncoder, self)._GetFormatters()
formatters.append(self._GetAngleParams)
return formatters
def _GetType(self, chart):
if len(chart.data) > 1:
if self.is3d:
warnings.warn(
'3d charts with more than one pie not supported; rendering in 2d',
RuntimeWarning, stacklevel=2)
chart_type = 'pc'
else:
if self.is3d:
chart_type = 'p3'
else:
chart_type = 'p'
return {'chart_type': chart_type}
def _GetDataSeriesParams(self, chart):
"""Collect params related to the data series."""
pie_points = []
labels = []
max_val = 1
for pie in chart.data:
points = []
for segment in pie:
if segment:
points.append(segment.size)
max_val = max(max_val, segment.size)
labels.append(segment.label or '')
if points:
pie_points.append(points)
encoder = self._GetDataEncoder(chart)
result = util.EncodeData(chart, pie_points, 0, max_val, encoder)
result.update(util.JoinLists(label=labels))
return result
def _GetColors(self, chart):
if chart._colors:
# Colors were overridden by the user
colors = chart._colors
else:
# Build the list of colors from individual segments
colors = []
for pie in chart.data:
for segment in pie:
if segment and segment.color:
colors.append(segment.color)
return util.JoinLists(color = colors)
def _GetAngleParams(self, chart):
"""If the user specified an angle, add it to the params."""
if self.angle:
return {'chp' : str(self.angle)}
return {}
| Python |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Backend which can generate charts using the Google Chart API."""
from mapreduce.lib.graphy import line_chart
from mapreduce.lib.graphy import bar_chart
from mapreduce.lib.graphy import pie_chart
from mapreduce.lib.graphy.backends.google_chart_api import encoders
def _GetChartFactory(chart_class, display_class):
"""Create a factory method for instantiating charts with displays.
Returns a method which, when called, will create & return a chart with
chart.display already populated.
"""
def Inner(*args, **kwargs):
chart = chart_class(*args, **kwargs)
chart.display = display_class(chart)
return chart
return Inner
# These helper methods make it easy to get chart objects with display
# objects already setup. For example, this:
# chart = google_chart_api.LineChart()
# is equivalent to:
# chart = line_chart.LineChart()
# chart.display = google_chart_api.LineChartEncoder()
#
# (If there's some chart type for which a helper method isn't available, you
# can always just instantiate the correct encoder manually, like in the 2nd
# example above).
# TODO: fix these so they have nice docs in ipython (give them __doc__)
LineChart = _GetChartFactory(line_chart.LineChart, encoders.LineChartEncoder)
Sparkline = _GetChartFactory(line_chart.Sparkline, encoders.SparklineEncoder)
BarChart = _GetChartFactory(bar_chart.BarChart, encoders.BarChartEncoder)
PieChart = _GetChartFactory(pie_chart.PieChart, encoders.PieChartEncoder)
| Python |
#!/usr/bin/env python
| Python |
#!/usr/bin/env python
| Python |
#!/usr/bin/env python
__version__='1.0'
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Blobstore API module."""
from blobstore import *
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Python blobstore API used by app developers.
Contains methods used to interface with Blobstore API. Includes db.Model-like
class representing a reference to a very large BLOB. Imports db.Key-like
class representing a blob-key.
"""
import cgi
import email
import os
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api.blobstore import blobstore
from google.appengine.ext import db
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_RANGE_HEADER',
'BlobFetchSizeTooLargeError',
'BlobInfo',
'BlobInfoParseError',
'BlobKey',
'BlobNotFoundError',
'BlobReferenceProperty',
'BlobReader',
'DataIndexOutOfRangeError',
'Error',
'InternalError',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'create_upload_url',
'delete',
'fetch_data',
'get',
'parse_blob_info']
Error = blobstore.Error
InternalError = blobstore.InternalError
BlobFetchSizeTooLargeError = blobstore.BlobFetchSizeTooLargeError
BlobNotFoundError = blobstore.BlobNotFoundError
_CreationFormatError = blobstore._CreationFormatError
DataIndexOutOfRangeError = blobstore.DataIndexOutOfRangeError
BlobKey = blobstore.BlobKey
create_upload_url = blobstore.create_upload_url
delete = blobstore.delete
class BlobInfoParseError(Error):
"""CGI parameter does not contain valid BlobInfo record."""
BLOB_INFO_KIND = blobstore.BLOB_INFO_KIND
BLOB_KEY_HEADER = blobstore.BLOB_KEY_HEADER
BLOB_RANGE_HEADER = blobstore.BLOB_RANGE_HEADER
MAX_BLOB_FETCH_SIZE = blobstore.MAX_BLOB_FETCH_SIZE
UPLOAD_INFO_CREATION_HEADER = blobstore.UPLOAD_INFO_CREATION_HEADER
class _GqlQuery(db.GqlQuery):
"""GqlQuery class that explicitly sets model-class.
This does the same as the original db.GqlQuery class except that it does
not try to find the model class based on the compiled GQL query. The
caller instead provides the query with a model class to use for construction.
This class is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
def __init__(self, query_string, model_class, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
model_class: Model class from which entities are constructed.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
from google.appengine.ext import gql
app = kwds.pop('_app', None)
self._proto_query = gql.GQL(query_string, _app=app, namespace='')
super(db.GqlQuery, self).__init__(model_class, namespace='')
self.bind(*args, **kwds)
class BlobInfo(object):
"""Information about blobs in Blobstore.
This is a db.Model-like class that contains information about blobs stored
by an application. Like db.Model, this class is backed by an Datastore
entity, however, BlobInfo instances are read-only and have a much more
limited interface.
Each BlobInfo has a key of type BlobKey associated with it. This key is
specific to the Blobstore API and is not compatible with db.get. The key
can be used for quick lookup by passing it to BlobInfo.get. This
key converts easily to a string, which is web safe and can be embedded
in URLs.
Properties:
content_type: Content type of blob.
creation: Creation date of blob, when it was uploaded.
filename: Filename user selected from their machine.
size: Size of uncompressed blob.
All properties are read-only. Attempting to assign a value to a property
will raise NotImplementedError.
"""
_unindexed_properties = frozenset()
@property
def content_type(self):
return self.__get_value('content_type')
@property
def creation(self):
return self.__get_value('creation')
@property
def filename(self):
return self.__get_value('filename')
@property
def size(self):
return self.__get_value('size')
def __init__(self, entity_or_blob_key, _values=None):
"""Constructor for wrapping blobstore entity.
The constructor should not be used outside this package and tests.
Args:
entity: Datastore entity that represents the blob reference.
"""
if isinstance(entity_or_blob_key, datastore.Entity):
self.__entity = entity_or_blob_key
self.__key = BlobKey(entity_or_blob_key.key().name())
elif isinstance(entity_or_blob_key, BlobKey):
self.__entity = _values
self.__key = entity_or_blob_key
else:
TypeError('Must provide Entity or BlobKey')
@classmethod
def from_entity(cls, entity):
"""Convert entity to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BlobInfo(entity)
@classmethod
def properties(cls):
"""Set of properties that belong to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return set(('content_type', 'creation', 'filename', 'size'))
def __get_value(self, name):
"""Get a BlobInfo value, loading entity if necessary.
This method allows lazy loading of the underlying datastore entity. It
should never be invoked directly.
Args:
name: Name of property to get value for.
Returns:
Value of BlobInfo property from entity.
"""
if self.__entity is None:
self.__entity = datastore.Get(
datastore_types.Key.from_path(
self.kind(), str(self.__key), namespace=''))
try:
return self.__entity[name]
except KeyError:
raise AttributeError(name)
def key(self):
"""Get key for blob.
Returns:
BlobKey instance that identifies this blob.
"""
return self.__key
def delete(self):
"""Permanently delete blob from Blobstore."""
delete(self.key())
@classmethod
def get(cls, blob_keys):
"""Retrieve BlobInfo by key or list of keys.
Args:
blob_keys: A key or a list of keys. Keys may be instances of str,
unicode and BlobKey.
Returns:
A BlobInfo instance associated with provided key or a list of BlobInfo
instances if a list of keys was provided. Keys that are not found in
Blobstore return None as their values.
"""
blob_keys = cls.__normalize_and_convert_keys(blob_keys)
try:
entities = datastore.Get(blob_keys)
except datastore_errors.EntityNotFoundError:
return None
if isinstance(entities, datastore.Entity):
return BlobInfo(entities)
else:
references = []
for entity in entities:
if entity is not None:
references.append(BlobInfo(entity))
else:
references.append(None)
return references
@classmethod
def all(cls):
"""Get query for all Blobs associated with application.
Returns:
A db.Query object querying over BlobInfo's datastore kind.
"""
return db.Query(model_class=cls, namespace='')
@classmethod
def __factory_for_kind(cls, kind):
if kind == BLOB_INFO_KIND:
return BlobInfo
raise ValueError('Cannot query for kind %s' % kind)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: Properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
Returns:
A gql.GqlQuery object querying over BlobInfo's datastore kind.
"""
return _GqlQuery('SELECT * FROM %s %s'
% (cls.kind(), query_string),
cls,
*args,
**kwds)
@classmethod
def kind(self):
"""Get the entity kind for the BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BLOB_INFO_KIND
@classmethod
def __normalize_and_convert_keys(cls, keys):
"""Normalize and convert all keys to BlobKey type.
This method is based on datastore.NormalizeAndTypeCheck().
Args:
keys: A single key or a list/tuple of keys. Keys may be a string
or BlobKey
Returns:
Single key or list with all strings replaced by BlobKey instances.
"""
if isinstance(keys, (list, tuple)):
multiple = True
keys = list(keys)
else:
multiple = False
keys = [keys]
for index, key in enumerate(keys):
if not isinstance(key, (basestring, BlobKey)):
raise datastore_errors.BadArgumentError(
'Expected str or BlobKey; received %s (a %s)' % (
key,
datastore.typename(key)))
keys[index] = datastore.Key.from_path(cls.kind(), str(key), namespace='')
if multiple:
return keys
else:
return keys[0]
def get(blob_key):
"""Get a BlobInfo record from blobstore.
Does the same as BlobInfo.get.
"""
return BlobInfo.get(blob_key)
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
"""
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dict, name):
value = dict.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key = BlobKey(get_value(field_storage.type_options, 'blob-key'))
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(blob_key,
{'content_type': content_type,
'creation': creation,
'filename': filename,
'size': size,
})
class BlobReferenceProperty(db.Property):
"""Property compatible with db.Model classes.
Add references to blobs to domain models using BlobReferenceProperty:
class Picture(db.Model):
title = db.StringProperty()
image = blobstore.BlobReferenceProperty()
thumbnail = blobstore.BlobReferenceProperty()
To find the size of a picture using this model:
picture = Picture.get(picture_key)
print picture.image.size
BlobInfo objects are lazily loaded so iterating over models with
for BlobKeys is efficient, the following does not need to hit
Datastore for each image key:
list_of_untitled_blobs = []
for picture in Picture.gql("WHERE title=''"):
list_of_untitled_blobs.append(picture.image.key())
"""
data_type = BlobInfo
def get_value_for_datastore(self, model_instance):
"""Translate model property to datastore value."""
blob_info = getattr(model_instance, self.name)
if blob_info is None:
return None
return blob_info.key()
def make_value_from_datastore(self, value):
"""Translate datastore value to BlobInfo."""
if value is None:
return None
return BlobInfo(value)
def validate(self, value):
"""Validate that assigned value is BlobInfo.
Automatically converts from strings and BlobKey instances.
"""
if isinstance(value, (basestring)):
value = BlobInfo(BlobKey(value))
elif isinstance(value, BlobKey):
value = BlobInfo(value)
return super(BlobReferenceProperty, self).validate(value)
def fetch_data(blob, start_index, end_index):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
"""
if isinstance(blob, BlobInfo):
blob = blob.key()
return blobstore.fetch_data(blob, start_index, end_index)
class BlobReader(object):
"""Provides a read-only file-like interface to a blobstore blob."""
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def __init__(self, blob_key, buffer_size=131072, position=0):
"""Constructor.
Args:
blob_key: The blob key or string blob key to read from.
buffer_size: The minimum size to fetch chunks of data from blobstore.
position: The initial position in the file.
"""
self.__blob_key = blob_key
self.__buffer_size = buffer_size
self.__buffer = ""
self.__position = position
self.__buffer_position = 0
self.__eof = False
self.__blob_info = None
def __iter__(self):
"""Returns a file iterator for this BlobReader."""
return self
def __getstate__(self):
"""Returns the serialized state for this BlobReader."""
return (self.__blob_key, self.__buffer_size, self.__position)
def __setstate__(self, state):
"""Restores pickled state for this BlobReader."""
self.__init__(*state)
def close(self):
"""Close the file.
A closed file cannot be read or written any more. Any operation which
requires that the file be open will raise a ValueError after the file has
been closed. Calling close() more than once is allowed.
"""
self.__blob_key = None
def flush(self):
raise IOError("BlobReaders are read-only")
def next(self):
"""Returns the next line from the file.
Returns:
A string, terminted by \n. The last line may not be terminated by \n.
If EOF is reached, an empty string will be returned.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def __read_from_buffer(self, size):
"""Reads at most size bytes from the buffer.
Args:
size: Number of bytes to read, or negative to read the entire buffer.
Returns:
Tuple (data, size):
data: The bytes read from the buffer.
size: The remaining unread byte count.
"""
if not self.__blob_key:
raise ValueError("File is closed")
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
data = self.__buffer[self.__buffer_position:end_pos]
data_length = len(data)
size -= data_length
self.__position += data_length
self.__buffer_position += data_length
if self.__buffer_position == len(self.__buffer):
self.__buffer = ""
self.__buffer_position = 0
return data, size
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
"""
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size
def read(self, size=-1):
"""Read at most size bytes from the file.
Fewer bytes are read if the read hits EOF before obtaining size bytes.
If the size argument is negative or omitted, read all data until EOF is
reached. The bytes are returned as a string object. An empty string is
returned when EOF is encountered immediately.
Calling read() without a size specified is likely to be dangerous, as it
may read excessive amounts of data.
Args:
size: Optional. The maximum number of bytes to read. When omitted, read()
returns all remaining data in the file.
Returns:
The read data, as a string.
"""
data_list = []
while True:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
return ''.join(data_list)
self.__fill_buffer(size)
def readline(self, size=-1):
"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent when a
file ends with an incomplete line). If the size argument is present and
non-negative, it is a maximum byte count (including the trailing newline)
and an incomplete line may be returned. An empty string is returned only
when EOF is encountered immediately.
Args:
size: Optional. The maximum number of bytes to read.
Returns:
The read data, as a string.
"""
data_list = []
while True:
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
newline_pos = self.__buffer.find('\n', self.__buffer_position, end_pos)
if newline_pos != -1:
data_list.append(
self.__read_from_buffer(newline_pos
- self.__buffer_position + 1)[0])
break
else:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
break
self.__fill_buffer()
return ''.join(data_list)
def readlines(self, sizehint=None):
"""Read until EOF using readline() and return a list of lines thus read.
If the optional sizehint argument is present, instead of reading up to EOF,
whole lines totalling approximately sizehint bytes (possibly after rounding
up to an internal buffer size) are read.
Args:
sizehint: A hint as to the maximum number of bytes to read.
Returns:
A list of strings, each being a single line from the file.
"""
lines = []
while sizehint is None or sizehint > 0:
line = self.readline()
if sizehint:
sizehint -= len(line)
if not line:
break
lines.append(line)
return lines
def seek(self, offset, whence=SEEK_SET):
"""Set the file's current position, like stdio's fseek().
The whence argument is optional and defaults to os.SEEK_SET or 0 (absolute
file positioning); other values are os.SEEK_CUR or 1 (seek relative to the
current position) and os.SEEK_END or 2 (seek relative to the file's end).
Args:
offset: The relative offset to seek to.
whence: Defines what the offset is relative to. See description for
details.
"""
if whence == BlobReader.SEEK_CUR:
offset = self.__position + offset
elif whence == BlobReader.SEEK_END:
offset = self.blob_info.size + offset
self.__buffer = ""
self.__buffer_position = 0
self.__position = offset
self.__eof = False
def tell(self):
"""Return the file's current position, like stdio's ftell()."""
return self.__position
def truncate(self, size):
raise IOError("BlobReaders are read-only")
def write(self, str):
raise IOError("BlobReaders are read-only")
def writelines(self, sequence):
raise IOError("BlobReaders are read-only")
@property
def blob_info(self):
"""Returns the BlobInfo for this file."""
if not self.__blob_info:
self.__blob_info = BlobInfo.get(self.__blob_key)
return self.__blob_info
@property
def closed(self):
"""Returns True if this file is closed, False otherwise."""
return self.__blob_key is None
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A Python blobstore API used by app developers.
Contains methods used to interface with Blobstore API. Includes db.Model-like
class representing a reference to a very large BLOB. Imports db.Key-like
class representing a blob-key.
"""
import cgi
import email
import os
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api.blobstore import blobstore
from google.appengine.ext import db
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_RANGE_HEADER',
'BlobFetchSizeTooLargeError',
'BlobInfo',
'BlobInfoParseError',
'BlobKey',
'BlobNotFoundError',
'BlobReferenceProperty',
'BlobReader',
'DataIndexOutOfRangeError',
'Error',
'InternalError',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'create_upload_url',
'delete',
'fetch_data',
'get',
'parse_blob_info']
Error = blobstore.Error
InternalError = blobstore.InternalError
BlobFetchSizeTooLargeError = blobstore.BlobFetchSizeTooLargeError
BlobNotFoundError = blobstore.BlobNotFoundError
_CreationFormatError = blobstore._CreationFormatError
DataIndexOutOfRangeError = blobstore.DataIndexOutOfRangeError
BlobKey = blobstore.BlobKey
create_upload_url = blobstore.create_upload_url
delete = blobstore.delete
class BlobInfoParseError(Error):
"""CGI parameter does not contain valid BlobInfo record."""
BLOB_INFO_KIND = blobstore.BLOB_INFO_KIND
BLOB_KEY_HEADER = blobstore.BLOB_KEY_HEADER
BLOB_RANGE_HEADER = blobstore.BLOB_RANGE_HEADER
MAX_BLOB_FETCH_SIZE = blobstore.MAX_BLOB_FETCH_SIZE
UPLOAD_INFO_CREATION_HEADER = blobstore.UPLOAD_INFO_CREATION_HEADER
class _GqlQuery(db.GqlQuery):
"""GqlQuery class that explicitly sets model-class.
This does the same as the original db.GqlQuery class except that it does
not try to find the model class based on the compiled GQL query. The
caller instead provides the query with a model class to use for construction.
This class is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
def __init__(self, query_string, model_class, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
model_class: Model class from which entities are constructed.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
from google.appengine.ext import gql
app = kwds.pop('_app', None)
self._proto_query = gql.GQL(query_string, _app=app, namespace='')
super(db.GqlQuery, self).__init__(model_class, namespace='')
self.bind(*args, **kwds)
class BlobInfo(object):
"""Information about blobs in Blobstore.
This is a db.Model-like class that contains information about blobs stored
by an application. Like db.Model, this class is backed by an Datastore
entity, however, BlobInfo instances are read-only and have a much more
limited interface.
Each BlobInfo has a key of type BlobKey associated with it. This key is
specific to the Blobstore API and is not compatible with db.get. The key
can be used for quick lookup by passing it to BlobInfo.get. This
key converts easily to a string, which is web safe and can be embedded
in URLs.
Properties:
content_type: Content type of blob.
creation: Creation date of blob, when it was uploaded.
filename: Filename user selected from their machine.
size: Size of uncompressed blob.
All properties are read-only. Attempting to assign a value to a property
will raise NotImplementedError.
"""
_unindexed_properties = frozenset()
@property
def content_type(self):
return self.__get_value('content_type')
@property
def creation(self):
return self.__get_value('creation')
@property
def filename(self):
return self.__get_value('filename')
@property
def size(self):
return self.__get_value('size')
def __init__(self, entity_or_blob_key, _values=None):
"""Constructor for wrapping blobstore entity.
The constructor should not be used outside this package and tests.
Args:
entity: Datastore entity that represents the blob reference.
"""
if isinstance(entity_or_blob_key, datastore.Entity):
self.__entity = entity_or_blob_key
self.__key = BlobKey(entity_or_blob_key.key().name())
elif isinstance(entity_or_blob_key, BlobKey):
self.__entity = _values
self.__key = entity_or_blob_key
else:
TypeError('Must provide Entity or BlobKey')
@classmethod
def from_entity(cls, entity):
"""Convert entity to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BlobInfo(entity)
@classmethod
def properties(cls):
"""Set of properties that belong to BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return set(('content_type', 'creation', 'filename', 'size'))
def __get_value(self, name):
"""Get a BlobInfo value, loading entity if necessary.
This method allows lazy loading of the underlying datastore entity. It
should never be invoked directly.
Args:
name: Name of property to get value for.
Returns:
Value of BlobInfo property from entity.
"""
if self.__entity is None:
self.__entity = datastore.Get(
datastore_types.Key.from_path(
self.kind(), str(self.__key), namespace=''))
try:
return self.__entity[name]
except KeyError:
raise AttributeError(name)
def key(self):
"""Get key for blob.
Returns:
BlobKey instance that identifies this blob.
"""
return self.__key
def delete(self):
"""Permanently delete blob from Blobstore."""
delete(self.key())
@classmethod
def get(cls, blob_keys):
"""Retrieve BlobInfo by key or list of keys.
Args:
blob_keys: A key or a list of keys. Keys may be instances of str,
unicode and BlobKey.
Returns:
A BlobInfo instance associated with provided key or a list of BlobInfo
instances if a list of keys was provided. Keys that are not found in
Blobstore return None as their values.
"""
blob_keys = cls.__normalize_and_convert_keys(blob_keys)
try:
entities = datastore.Get(blob_keys)
except datastore_errors.EntityNotFoundError:
return None
if isinstance(entities, datastore.Entity):
return BlobInfo(entities)
else:
references = []
for entity in entities:
if entity is not None:
references.append(BlobInfo(entity))
else:
references.append(None)
return references
@classmethod
def all(cls):
"""Get query for all Blobs associated with application.
Returns:
A db.Query object querying over BlobInfo's datastore kind.
"""
return db.Query(model_class=cls, namespace='')
@classmethod
def __factory_for_kind(cls, kind):
if kind == BLOB_INFO_KIND:
return BlobInfo
raise ValueError('Cannot query for kind %s' % kind)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: Properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
Returns:
A gql.GqlQuery object querying over BlobInfo's datastore kind.
"""
return _GqlQuery('SELECT * FROM %s %s'
% (cls.kind(), query_string),
cls,
*args,
**kwds)
@classmethod
def kind(self):
"""Get the entity kind for the BlobInfo.
This method is required for compatibility with the current db.py query
mechanism but will be removed in the future. DO NOT USE.
"""
return BLOB_INFO_KIND
@classmethod
def __normalize_and_convert_keys(cls, keys):
"""Normalize and convert all keys to BlobKey type.
This method is based on datastore.NormalizeAndTypeCheck().
Args:
keys: A single key or a list/tuple of keys. Keys may be a string
or BlobKey
Returns:
Single key or list with all strings replaced by BlobKey instances.
"""
if isinstance(keys, (list, tuple)):
multiple = True
keys = list(keys)
else:
multiple = False
keys = [keys]
for index, key in enumerate(keys):
if not isinstance(key, (basestring, BlobKey)):
raise datastore_errors.BadArgumentError(
'Expected str or BlobKey; received %s (a %s)' % (
key,
datastore.typename(key)))
keys[index] = datastore.Key.from_path(cls.kind(), str(key), namespace='')
if multiple:
return keys
else:
return keys[0]
def get(blob_key):
"""Get a BlobInfo record from blobstore.
Does the same as BlobInfo.get.
"""
return BlobInfo.get(blob_key)
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
"""
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dict, name):
value = dict.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key = BlobKey(get_value(field_storage.type_options, 'blob-key'))
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(blob_key,
{'content_type': content_type,
'creation': creation,
'filename': filename,
'size': size,
})
class BlobReferenceProperty(db.Property):
"""Property compatible with db.Model classes.
Add references to blobs to domain models using BlobReferenceProperty:
class Picture(db.Model):
title = db.StringProperty()
image = blobstore.BlobReferenceProperty()
thumbnail = blobstore.BlobReferenceProperty()
To find the size of a picture using this model:
picture = Picture.get(picture_key)
print picture.image.size
BlobInfo objects are lazily loaded so iterating over models with
for BlobKeys is efficient, the following does not need to hit
Datastore for each image key:
list_of_untitled_blobs = []
for picture in Picture.gql("WHERE title=''"):
list_of_untitled_blobs.append(picture.image.key())
"""
data_type = BlobInfo
def get_value_for_datastore(self, model_instance):
"""Translate model property to datastore value."""
blob_info = getattr(model_instance, self.name)
if blob_info is None:
return None
return blob_info.key()
def make_value_from_datastore(self, value):
"""Translate datastore value to BlobInfo."""
if value is None:
return None
return BlobInfo(value)
def validate(self, value):
"""Validate that assigned value is BlobInfo.
Automatically converts from strings and BlobKey instances.
"""
if isinstance(value, (basestring)):
value = BlobInfo(BlobKey(value))
elif isinstance(value, BlobKey):
value = BlobInfo(value)
return super(BlobReferenceProperty, self).validate(value)
def fetch_data(blob, start_index, end_index):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
"""
if isinstance(blob, BlobInfo):
blob = blob.key()
return blobstore.fetch_data(blob, start_index, end_index)
class BlobReader(object):
"""Provides a read-only file-like interface to a blobstore blob."""
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def __init__(self, blob_key, buffer_size=131072, position=0):
"""Constructor.
Args:
blob_key: The blob key or string blob key to read from.
buffer_size: The minimum size to fetch chunks of data from blobstore.
position: The initial position in the file.
"""
self.__blob_key = blob_key
self.__buffer_size = buffer_size
self.__buffer = ""
self.__position = position
self.__buffer_position = 0
self.__eof = False
self.__blob_info = None
def __iter__(self):
"""Returns a file iterator for this BlobReader."""
return self
def __getstate__(self):
"""Returns the serialized state for this BlobReader."""
return (self.__blob_key, self.__buffer_size, self.__position)
def __setstate__(self, state):
"""Restores pickled state for this BlobReader."""
self.__init__(*state)
def close(self):
"""Close the file.
A closed file cannot be read or written any more. Any operation which
requires that the file be open will raise a ValueError after the file has
been closed. Calling close() more than once is allowed.
"""
self.__blob_key = None
def flush(self):
raise IOError("BlobReaders are read-only")
def next(self):
"""Returns the next line from the file.
Returns:
A string, terminted by \n. The last line may not be terminated by \n.
If EOF is reached, an empty string will be returned.
"""
line = self.readline()
if not line:
raise StopIteration
return line
def __read_from_buffer(self, size):
"""Reads at most size bytes from the buffer.
Args:
size: Number of bytes to read, or negative to read the entire buffer.
Returns:
Tuple (data, size):
data: The bytes read from the buffer.
size: The remaining unread byte count.
"""
if not self.__blob_key:
raise ValueError("File is closed")
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
data = self.__buffer[self.__buffer_position:end_pos]
data_length = len(data)
size -= data_length
self.__position += data_length
self.__buffer_position += data_length
if self.__buffer_position == len(self.__buffer):
self.__buffer = ""
self.__buffer_position = 0
return data, size
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
"""
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size
def read(self, size=-1):
"""Read at most size bytes from the file.
Fewer bytes are read if the read hits EOF before obtaining size bytes.
If the size argument is negative or omitted, read all data until EOF is
reached. The bytes are returned as a string object. An empty string is
returned when EOF is encountered immediately.
Calling read() without a size specified is likely to be dangerous, as it
may read excessive amounts of data.
Args:
size: Optional. The maximum number of bytes to read. When omitted, read()
returns all remaining data in the file.
Returns:
The read data, as a string.
"""
data_list = []
while True:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
return ''.join(data_list)
self.__fill_buffer(size)
def readline(self, size=-1):
"""Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent when a
file ends with an incomplete line). If the size argument is present and
non-negative, it is a maximum byte count (including the trailing newline)
and an incomplete line may be returned. An empty string is returned only
when EOF is encountered immediately.
Args:
size: Optional. The maximum number of bytes to read.
Returns:
The read data, as a string.
"""
data_list = []
while True:
if size < 0:
end_pos = len(self.__buffer)
else:
end_pos = self.__buffer_position + size
newline_pos = self.__buffer.find('\n', self.__buffer_position, end_pos)
if newline_pos != -1:
data_list.append(
self.__read_from_buffer(newline_pos
- self.__buffer_position + 1)[0])
break
else:
data, size = self.__read_from_buffer(size)
data_list.append(data)
if size == 0 or self.__eof:
break
self.__fill_buffer()
return ''.join(data_list)
def readlines(self, sizehint=None):
"""Read until EOF using readline() and return a list of lines thus read.
If the optional sizehint argument is present, instead of reading up to EOF,
whole lines totalling approximately sizehint bytes (possibly after rounding
up to an internal buffer size) are read.
Args:
sizehint: A hint as to the maximum number of bytes to read.
Returns:
A list of strings, each being a single line from the file.
"""
lines = []
while sizehint is None or sizehint > 0:
line = self.readline()
if sizehint:
sizehint -= len(line)
if not line:
break
lines.append(line)
return lines
def seek(self, offset, whence=SEEK_SET):
"""Set the file's current position, like stdio's fseek().
The whence argument is optional and defaults to os.SEEK_SET or 0 (absolute
file positioning); other values are os.SEEK_CUR or 1 (seek relative to the
current position) and os.SEEK_END or 2 (seek relative to the file's end).
Args:
offset: The relative offset to seek to.
whence: Defines what the offset is relative to. See description for
details.
"""
if whence == BlobReader.SEEK_CUR:
offset = self.__position + offset
elif whence == BlobReader.SEEK_END:
offset = self.blob_info.size + offset
self.__buffer = ""
self.__buffer_position = 0
self.__position = offset
self.__eof = False
def tell(self):
"""Return the file's current position, like stdio's ftell()."""
return self.__position
def truncate(self, size):
raise IOError("BlobReaders are read-only")
def write(self, str):
raise IOError("BlobReaders are read-only")
def writelines(self, sequence):
raise IOError("BlobReaders are read-only")
@property
def blob_info(self):
"""Returns the BlobInfo for this file."""
if not self.__blob_info:
self.__blob_info = BlobInfo.get(self.__blob_key)
return self.__blob_info
@property
def closed(self):
"""Returns True if this file is closed, False otherwise."""
return self.__blob_key is None
| Python |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Blobstore API module."""
from blobstore import *
| Python |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| Python |
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses a (possibly invalid) XML or HTML document into a
tree representation. It provides methods and Pythonic idioms that make
it easy to navigate, search, and modify the tree.
A well-formed XML/HTML document yields a well-formed data
structure. An ill-formed XML/HTML document yields a correspondingly
ill-formed data structure. If your document is only locally
well-formed, you can use this library to find and process the
well-formed part of it.
Beautiful Soup works with Python 2.2 and up. It has no external
dependencies, but you'll have more success at converting data to UTF-8
if you also install these three packages:
* chardet, for auto-detecting character encodings
http://chardet.feedparser.org/
* cjkcodecs and iconv_codec, which add more encodings to the ones supported
by stock Python.
http://cjkpython.i18n.org/
Beautiful Soup defines classes for two main parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid. This class has web browser-like heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup also defines a class (UnicodeDammit) for autodetecting
the encoding of an HTML or XML document, and converting it to
Unicode. Much of this code is taken from Mark Pilgrim's Universal Feed Parser.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/documentation.html
Here, have some legalese:
Copyright (c) 2004-2010, Leonard Richardson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the the Beautiful Soup Consortium and All
Night Kosher Bakery nor the names of its contributors may be
used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE, DAMMIT.
"""
from __future__ import generators
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "3.0.8.1"
__copyright__ = "Copyright (c) 2004-2010 Leonard Richardson"
__license__ = "New-style BSD"
from sgmllib import SGMLParser, SGMLParseError
import codecs
import markupbase
import types
import re
import sgmllib
try:
from htmlentitydefs import name2codepoint
except ImportError:
name2codepoint = {}
try:
set
except NameError:
from sets import Set as set
#These hacks make Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
markupbase._declname_match = re.compile(r'[a-zA-Z][-_.:a-zA-Z0-9]*\s*').match
DEFAULT_OUTPUT_ENCODING = "utf-8"
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
# First, the classes that represent markup elements.
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = None
self.previousSibling = None
self.nextSibling = None
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def replaceWith(self, replaceWith):
oldParent = self.parent
myIndex = self.parent.index(self)
if hasattr(replaceWith, "parent")\
and replaceWith.parent is self.parent:
# We're replacing this element with one of its siblings.
index = replaceWith.parent.index(replaceWith)
if index and index < myIndex:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
myIndex = myIndex - 1
self.extract()
oldParent.insert(myIndex, replaceWith)
def replaceWithChildren(self):
myParent = self.parent
myIndex = self.parent.index(self)
self.extract()
reversedChildren = list(self.contents)
reversedChildren.reverse()
for child in reversedChildren:
myParent.insert(myIndex, child)
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
try:
del self.parent.contents[self.parent.index(self)]
except ValueError:
pass
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
lastChild = self._lastRecursiveChild()
nextElement = lastChild.next
if self.previous:
self.previous.next = nextElement
if nextElement:
nextElement.previous = self.previous
self.previous = None
lastChild.next = None
self.parent = None
if self.previousSibling:
self.previousSibling.nextSibling = self.nextSibling
if self.nextSibling:
self.nextSibling.previousSibling = self.previousSibling
self.previousSibling = self.nextSibling = None
return self
def _lastRecursiveChild(self):
"Finds the last element beneath this object to be parsed."
lastChild = self
while hasattr(lastChild, 'contents') and lastChild.contents:
lastChild = lastChild.contents[-1]
return lastChild
def insert(self, position, newChild):
if isinstance(newChild, basestring) \
and not isinstance(newChild, NavigableString):
newChild = NavigableString(newChild)
position = min(position, len(self.contents))
if hasattr(newChild, 'parent') and newChild.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if newChild.parent is self:
index = self.index(newChild)
if index > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position = position - 1
newChild.extract()
newChild.parent = self
previousChild = None
if position == 0:
newChild.previousSibling = None
newChild.previous = self
else:
previousChild = self.contents[position-1]
newChild.previousSibling = previousChild
newChild.previousSibling.nextSibling = newChild
newChild.previous = previousChild._lastRecursiveChild()
if newChild.previous:
newChild.previous.next = newChild
newChildsLastElement = newChild._lastRecursiveChild()
if position >= len(self.contents):
newChild.nextSibling = None
parent = self
parentsNextSibling = None
while not parentsNextSibling:
parentsNextSibling = parent.nextSibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parentsNextSibling:
newChildsLastElement.next = parentsNextSibling
else:
newChildsLastElement.next = None
else:
nextChild = self.contents[position]
newChild.nextSibling = nextChild
if newChild.nextSibling:
newChild.nextSibling.previousSibling = newChild
newChildsLastElement.next = nextChild
if newChildsLastElement.next:
newChildsLastElement.next.previous = newChildsLastElement
self.contents.insert(position, newChild)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs)
def findAllNext(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.nextGenerator,
**kwargs)
def findNextSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._findOne(self.findNextSiblings, name, attrs, text,
**kwargs)
def findNextSiblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.nextSiblingGenerator, **kwargs)
fetchNextSiblings = findNextSiblings # Compatibility with pre-3.x
def findPrevious(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._findOne(self.findAllPrevious, name, attrs, text, **kwargs)
def findAllPrevious(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._findAll(name, attrs, text, limit, self.previousGenerator,
**kwargs)
fetchPrevious = findAllPrevious # Compatibility with pre-3.x
def findPreviousSibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._findOne(self.findPreviousSiblings, name, attrs, text,
**kwargs)
def findPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._findAll(name, attrs, text, limit,
self.previousSiblingGenerator, **kwargs)
fetchPreviousSiblings = findPreviousSiblings # Compatibility with pre-3.x
def findParent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _findOne because findParents takes a different
# set of arguments.
r = None
l = self.findParents(name, attrs, 1)
if l:
r = l[0]
return r
def findParents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._findAll(name, attrs, None, limit, self.parentGenerator,
**kwargs)
fetchParents = findParents # Compatibility with pre-3.x
#These methods do the real heavy lifting.
def _findOne(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _findAll(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
# (Possibly) special case some findAll*(...) searches
elif text is None and not limit and not attrs and not kwargs:
# findAll*(True)
if name is True:
return [element for element in generator()
if isinstance(element, Tag)]
# findAll*('tag-name')
elif isinstance(name, basestring):
return [element for element in generator()
if isinstance(element, Tag) and
element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
# Build a SoupStrainer
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These Generators can be used to navigate starting from both
#NavigableStrings and Tags.
def nextGenerator(self):
i = self
while i is not None:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i is not None:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i is not None:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i is not None:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i is not None:
i = i.parent
yield i
# Utility methods
def substituteEncoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
def toEncoding(self, s, encoding=None):
"""Encodes an object to a string in some encoding, or to Unicode.
."""
if isinstance(s, unicode):
if encoding:
s = s.encode(encoding)
elif isinstance(s, str):
if encoding:
s = s.encode(encoding)
else:
s = unicode(s)
else:
if encoding:
s = self.toEncoding(str(s), encoding)
else:
s = unicode(s)
return s
class NavigableString(unicode, PageElement):
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (NavigableString.__str__(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
def __unicode__(self):
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
if encoding:
return self.encode(encoding)
else:
return self
class CData(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<![CDATA[%s]]>" % NavigableString.__str__(self, encoding)
class ProcessingInstruction(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
output = self
if "%SOUP-ENCODING%" in output:
output = self.substituteEncoding(output, encoding)
return "<?%s?>" % self.toEncoding(output, encoding)
class Comment(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!--%s-->" % NavigableString.__str__(self, encoding)
class Declaration(NavigableString):
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING):
return "<!%s>" % NavigableString.__str__(self, encoding)
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def _invert(h):
"Cheap function to invert a hash."
i = {}
for k,v in h.items():
i[v] = k
return i
XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
"quot" : '"',
"amp" : "&",
"lt" : "<",
"gt" : ">" }
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
def _convertEntities(self, match):
"""Used in a call to re.sub to replace HTML, XML, and numeric
entities with the appropriate Unicode characters. If HTML
entities are being converted, any unrecognized entities are
escaped."""
x = match.group(1)
if self.convertHTMLEntities and x in name2codepoint:
return unichr(name2codepoint[x])
elif x in self.XML_ENTITIES_TO_SPECIAL_CHARS:
if self.convertXMLEntities:
return self.XML_ENTITIES_TO_SPECIAL_CHARS[x]
else:
return u'&%s;' % x
elif len(x) > 0 and x[0] == '#':
# Handle numeric entities
if len(x) > 1 and x[1] == 'x':
return unichr(int(x[2:], 16))
else:
return unichr(int(x[1:]))
elif self.escapeUnrecognizedEntities:
return u'&%s;' % x
else:
return u'&%s;' % x
def __init__(self, parser, name, attrs=None, parent=None,
previous=None):
"Basic constructor."
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected
self.parserClass = parser.__class__
self.isSelfClosing = parser.isSelfClosingTag(name)
self.name = name
if attrs is None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
self.containsSubstitutions = False
self.convertHTMLEntities = parser.convertHTMLEntities
self.convertXMLEntities = parser.convertXMLEntities
self.escapeUnrecognizedEntities = parser.escapeUnrecognizedEntities
# Convert any HTML, XML, or numeric entities in the attribute values.
convert = lambda(k, val): (k,
re.sub("&(#\d+|#x[0-9a-fA-F]+|\w+);",
self._convertEntities,
val))
self.attrs = map(convert, self.attrs)
def getString(self):
if (len(self.contents) == 1
and isinstance(self.contents[0], NavigableString)):
return self.contents[0]
def setString(self, string):
"""Replace the contents of the tag with a string"""
self.clear()
self.append(string)
string = property(getString, setString)
def getText(self, separator=u""):
if not len(self.contents):
return u""
stopNode = self._lastRecursiveChild().next
strings = []
current = self.contents[0]
while current is not stopNode:
if isinstance(current, NavigableString):
strings.append(current.strip())
current = current.next
return separator.join(strings)
text = property(getText)
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def clear(self):
"""Extract all children."""
for child in self.contents[:]:
child.extract()
def index(self, element):
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def has_key(self, key):
return self._getAttrMap().has_key(key)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
findAll() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.findAll, args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.find(tag[:-3])
elif tag.find('__') != 0:
return self.find(tag)
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__, tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if other is self:
return True
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.__str__(encoding)
def __unicode__(self):
return self.__str__(None)
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
+ "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
+ ")")
def _sub_entity(self, x):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
return "&" + self.XML_SPECIAL_CHARS_TO_ENTITIES[x.group(0)[0]] + ";"
def __str__(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Returns a string or Unicode representation of this tag and
its contents. To get Unicode, pass None for encoding.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
encodedName = self.toEncoding(self.name, encoding)
attrs = []
if self.attrs:
for key, val in self.attrs:
fmt = '%s="%s"'
if isinstance(val, basestring):
if self.containsSubstitutions and '%SOUP-ENCODING%' in val:
val = self.substituteEncoding(val, encoding)
# The attribute value either:
#
# * Contains no embedded double quotes or single quotes.
# No problem: we enclose it in double quotes.
# * Contains embedded single quotes. No problem:
# double quotes work here too.
# * Contains embedded double quotes. No problem:
# we enclose it in single quotes.
# * Embeds both single _and_ double quotes. This
# can't happen naturally, but it can happen if
# you modify an attribute value after parsing
# the document. Now we have a bit of a
# problem. We solve it by enclosing the
# attribute in single quotes, and escaping any
# embedded single quotes to XML entities.
if '"' in val:
fmt = "%s='%s'"
if "'" in val:
# TODO: replace with apos when
# appropriate.
val = val.replace("'", "&squot;")
# Now we're okay w/r/t quotes. But the attribute
# value might also contain angle brackets, or
# ampersands that aren't part of entities. We need
# to escape those to XML entities too.
val = self.BARE_AMPERSAND_OR_BRACKET.sub(self._sub_entity, val)
attrs.append(fmt % (self.toEncoding(key, encoding),
self.toEncoding(val, encoding)))
close = ''
closeTag = ''
if self.isSelfClosing:
close = ' /'
else:
closeTag = '</%s>' % encodedName
indentTag, indentContents = 0, 0
if prettyPrint:
indentTag = indentLevel
space = (' ' * (indentTag-1))
indentContents = indentTag + 1
contents = self.renderContents(encoding, prettyPrint, indentContents)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if prettyPrint:
s.append(space)
s.append('<%s%s%s>' % (encodedName, attributeString, close))
if prettyPrint:
s.append("\n")
s.append(contents)
if prettyPrint and contents and contents[-1] != "\n":
s.append("\n")
if prettyPrint and closeTag:
s.append(space)
s.append(closeTag)
if prettyPrint and closeTag and self.nextSibling:
s.append("\n")
s = ''.join(s)
return s
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
if len(self.contents) == 0:
return
current = self.contents[0]
while current is not None:
next = current.next
if isinstance(current, Tag):
del current.contents[:]
current.parent = None
current.previous = None
current.previousSibling = None
current.next = None
current.nextSibling = None
current = next
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.__str__(encoding, True)
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
"""Renders the contents of this tag as a string in the given
encoding. If encoding is None, returns a Unicode string.."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.__str__(encoding)
elif isinstance(c, Tag):
s.append(c.__str__(encoding, prettyPrint, indentLevel))
if text and prettyPrint:
text = text.strip()
if text:
if prettyPrint:
s.append(" " * (indentLevel-1))
s.append(text)
if prettyPrint:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.findAll(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def findAll(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._findAll(name, attrs, text, limit, generator, **kwargs)
findChildren = findAll
# Pre-3.x compatibility methods
first = find
fetch = findAll
def fetchText(self, text=None, recursive=True, limit=None):
return self.findAll(text=text, recursive=recursive, limit=limit)
def firstText(self, text=None, recursive=True):
return self.find(text=text, recursive=recursive)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
# Just use the iterator from the contents
return iter(self.contents)
def recursiveChildGenerator(self):
if not len(self.contents):
raise StopIteration
stopNode = self._lastRecursiveChild().next
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next
# Next, a couple classes to represent queries and their results.
class SoupStrainer:
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def searchTag(self, markupName=None, markupAttrs={}):
found = None
markup = None
if isinstance(markupName, Tag):
markup = markupName
markupAttrs = markup
callFunctionWithTagData = callable(self.name) \
and not isinstance(markupName, Tag)
if (not self.name) \
or callFunctionWithTagData \
or (markup and self._matches(markup, self.name)) \
or (not markup and self._matches(markupName, self.name)):
if callFunctionWithTagData:
match = self.name(markupName, markupAttrs)
else:
match = True
markupAttrMap = None
for attr, matchAgainst in self.attrs.items():
if not markupAttrMap:
if hasattr(markupAttrs, 'get'):
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
for k,v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
match = False
break
if match:
if markup:
found = markup
else:
found = markupName
return found
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, "__iter__") \
and not isinstance(markup, Tag):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.searchTag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception, "I don't know how to match against a %s" \
% markup.__class__
return found
def _matches(self, markup, matchAgainst):
#print "Matching %s against %s" % (markup, matchAgainst)
result = False
if matchAgainst is True:
result = markup is not None
elif callable(matchAgainst):
result = matchAgainst(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(matchAgainst, 'match'):
# It's a regexp object.
result = markup and matchAgainst.search(markup)
elif hasattr(matchAgainst, '__iter__'): # list-like
result = markup in matchAgainst
elif hasattr(matchAgainst, 'items'):
result = markup.has_key(matchAgainst)
elif matchAgainst and isinstance(markup, basestring):
if isinstance(markup, unicode):
matchAgainst = unicode(matchAgainst)
else:
matchAgainst = str(matchAgainst)
if not result:
result = matchAgainst == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
# Now, some helper functions.
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS, NESTABLE_TAGS, and
NESTING_RESET_TAGS maps out of lists and partial maps."""
built = {}
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
for k,v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
for k in portion:
built[k] = default
else:
#It's a scalar. Map it to the default.
built[portion] = default
return built
# Now, the parser classes.
class BeautifulStoneSoup(Tag, SGMLParser):
"""This class contains the basic parser and search code. It defines
a parser that knows nothing about tag behavior except for the
following:
You can't close a tag without closing all the tags it encloses.
That is, "<foo><bar></foo>" actually means
"<foo><bar></bar></foo>".
[Another possible explanation is "<foo><bar /></foo>", but since
this class defines no SELF_CLOSING_TAGS, it will never use that
explanation.]
This class is useful for parsing XML or made-up markup languages,
or when BeautifulSoup makes an assumption counter to what you were
expecting."""
SELF_CLOSING_TAGS = {}
NESTABLE_TAGS = {}
RESET_NESTING_TAGS = {}
QUOTE_TAGS = {}
PRESERVE_WHITESPACE_TAGS = []
MARKUP_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda x: x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda x: '<!' + x.group(1) + '>')
]
ROOT_TAG_NAME = u'[document]'
HTML_ENTITIES = "html"
XML_ENTITIES = "xml"
XHTML_ENTITIES = "xhtml"
# TODO: This only exists for backwards-compatibility
ALL_ENTITIES = XHTML_ENTITIES
# Used when determining whether a text node is all whitespace and
# can be replaced with a single space. A text node that contains
# fancy Unicode spaces (usually non-breaking) should be left
# alone.
STRIP_ASCII_SPACES = { 9: None, 10: None, 12: None, 13: None, 32: None, }
def __init__(self, markup="", parseOnlyThese=None, fromEncoding=None,
markupMassage=True, smartQuotesTo=XML_ENTITIES,
convertEntities=None, selfClosingTags=None, isHTML=False):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser.
sgmllib will process most bad HTML, and the BeautifulSoup
class has some tricks for dealing with some HTML that kills
sgmllib, but Beautiful Soup can nonetheless choke or lose data
if your data uses self-closing tags or declarations
incorrectly.
By default, Beautiful Soup uses regexes to sanitize input,
avoiding the vast majority of these problems. If the problems
don't apply to you, pass in False for markupMassage, and
you'll get better performance.
The default parser massage techniques fix the two most common
instances of invalid HTML that choke sgmllib:
<br/> (No space between name of closing tag and tag close)
<! --Comment--> (Extraneous whitespace in declaration)
You can pass in a custom list of (RE object, replace method)
tuples to get Beautiful Soup to scrub your input the way you
want."""
self.parseOnlyThese = parseOnlyThese
self.fromEncoding = fromEncoding
self.smartQuotesTo = smartQuotesTo
self.convertEntities = convertEntities
# Set the rules for how we'll deal with the entities we
# encounter
if self.convertEntities:
# It doesn't make sense to convert encoded characters to
# entities even while you're converting entities to Unicode.
# Just convert it all to Unicode.
self.smartQuotesTo = None
if convertEntities == self.HTML_ENTITIES:
self.convertXMLEntities = False
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = True
elif convertEntities == self.XHTML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = True
self.escapeUnrecognizedEntities = False
elif convertEntities == self.XML_ENTITIES:
self.convertXMLEntities = True
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
else:
self.convertXMLEntities = False
self.convertHTMLEntities = False
self.escapeUnrecognizedEntities = False
self.instanceSelfClosingTags = buildTagMap(None, selfClosingTags)
SGMLParser.__init__(self)
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
self.markup = markup
self.markupMassage = markupMassage
try:
self._feed(isHTML=isHTML)
except StopParsing:
pass
self.markup = None # The markup can now be GCed
def convert_charref(self, name):
"""This method fixes a bug in Python's SGMLParser."""
try:
n = int(name)
except ValueError:
return
if not 0 <= n <= 127 : # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
def _feed(self, inDocumentEncoding=None, isHTML=False):
# Convert the document to Unicode.
markup = self.markup
if isinstance(markup, unicode):
if not hasattr(self, 'originalEncoding'):
self.originalEncoding = None
else:
dammit = UnicodeDammit\
(markup, [self.fromEncoding, inDocumentEncoding],
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
markup = dammit.unicode
self.originalEncoding = dammit.originalEncoding
self.declaredHTMLEncoding = dammit.declaredHTMLEncoding
if markup:
if self.markupMassage:
if not hasattr(self.markupMassage, "__iter__"):
self.markupMassage = self.MARKUP_MASSAGE
for fix, m in self.markupMassage:
markup = fix.sub(m, markup)
# TODO: We get rid of markupMassage so that the
# soup object can be deepcopied later on. Some
# Python installations can't copy regexes. If anyone
# was relying on the existence of markupMassage, this
# might cause problems.
del(self.markupMassage)
self.reset()
SGMLParser.feed(self, markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def __getattr__(self, methodName):
"""This method routes method call requests to either the SGMLParser
superclass or the Tag superclass, depending on the method name."""
#print "__getattr__ called on %s.%s" % (self.__class__, methodName)
if methodName.startswith('start_') or methodName.startswith('end_') \
or methodName.startswith('do_'):
return SGMLParser.__getattr__(self, methodName)
elif not methodName.startswith('__'):
return Tag.__getattr__(self, methodName)
else:
raise AttributeError
def isSelfClosingTag(self, name):
"""Returns true iff the given string is the name of a
self-closing tag according to this parser."""
return self.SELF_CLOSING_TAGS.has_key(name) \
or self.instanceSelfClosingTags.has_key(name)
def reset(self):
Tag.__init__(self, self, self.ROOT_TAG_NAME)
self.hidden = 1
SGMLParser.reset(self)
self.currentData = []
self.currentTag = None
self.tagStack = []
self.quoteStack = []
self.pushTag(self)
def popTag(self):
tag = self.tagStack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
def endData(self, containerClass=NavigableString):
if self.currentData:
currentData = u''.join(self.currentData)
if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and
not set([tag.name for tag in self.tagStack]).intersection(
self.PRESERVE_WHITESPACE_TAGS)):
if '\n' in currentData:
currentData = '\n'
else:
currentData = ' '
self.currentData = []
if self.parseOnlyThese and len(self.tagStack) <= 1 and \
(not self.parseOnlyThese.text or \
not self.parseOnlyThese.search(currentData)):
return
o = containerClass(currentData)
o.setup(self.currentTag, self.previous)
if self.previous:
self.previous.next = o
self.previous = o
self.currentTag.contents.append(o)
def _popToTag(self, name, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
return
numPops = 0
mostRecentTag = None
for i in range(len(self.tagStack)-1, 0, -1):
if name == self.tagStack[i].name:
numPops = len(self.tagStack)-i
break
if not inclusivePop:
numPops = numPops - 1
for i in range(0, numPops):
mostRecentTag = self.popTag()
return mostRecentTag
def _smartPop(self, name):
"""We need to pop up to the previous tag of this type, unless
one of this tag's nesting reset triggers comes between this
tag and the previous tag of this type, OR unless this tag is a
generic nesting trigger and another generic nesting trigger
comes between this tag and the previous tag of this type.
Examples:
<p>Foo<b>Bar *<p>* should pop to 'p', not 'b'.
<p>Foo<table>Bar *<p>* should pop to 'table', not 'p'.
<p>Foo<table><tr>Bar *<p>* should pop to 'tr', not 'p'.
<li><ul><li> *<li>* should pop to 'ul', not the first 'li'.
<tr><table><tr> *<tr>* should pop to 'table', not the first 'tr'
<td><tr><td> *<td>* should pop to 'tr', not the first 'td'
"""
nestingResetTriggers = self.NESTABLE_TAGS.get(name)
isNestable = nestingResetTriggers != None
isResetNesting = self.RESET_NESTING_TAGS.has_key(name)
popTo = None
inclusive = True
for i in range(len(self.tagStack)-1, 0, -1):
p = self.tagStack[i]
if (not p or p.name == name) and not isNestable:
#Non-nestable tags get popped to the top or to their
#last occurance.
popTo = name
break
if (nestingResetTriggers is not None
and p.name in nestingResetTriggers) \
or (nestingResetTriggers is None and isResetNesting
and self.RESET_NESTING_TAGS.has_key(p.name)):
#If we encounter one of the nesting reset triggers
#peculiar to this tag, or we encounter another tag
#that causes nesting to reset, pop up to but not
#including that tag.
popTo = p.name
inclusive = False
break
p = p.parent
if popTo:
self._popToTag(popTo, inclusive)
def unknown_starttag(self, name, attrs, selfClosing=0):
#print "Start tag %s: %s" % (name, attrs)
if self.quoteStack:
#This is not a real tag.
#print "<%s> is not real!" % name
attrs = ''.join([' %s="%s"' % (x, y) for x, y in attrs])
self.handle_data('<%s%s>' % (name, attrs))
return
self.endData()
if not self.isSelfClosingTag(name) and not selfClosing:
self._smartPop(name)
if self.parseOnlyThese and len(self.tagStack) <= 1 \
and (self.parseOnlyThese.text or not self.parseOnlyThese.searchTag(name, attrs)):
return
tag = Tag(self, name, attrs, self.currentTag, self.previous)
if self.previous:
self.previous.next = tag
self.previous = tag
self.pushTag(tag)
if selfClosing or self.isSelfClosingTag(name):
self.popTag()
if name in self.QUOTE_TAGS:
#print "Beginning quote (%s)" % name
self.quoteStack.append(name)
self.literal = 1
return tag
def unknown_endtag(self, name):
#print "End tag %s" % name
if self.quoteStack and self.quoteStack[-1] != name:
#This is not a real end tag.
#print "</%s> is not real!" % name
self.handle_data('</%s>' % name)
return
self.endData()
self._popToTag(name)
if self.quoteStack and self.quoteStack[-1] == name:
self.quoteStack.pop()
self.literal = (len(self.quoteStack) > 0)
def handle_data(self, data):
self.currentData.append(data)
def _toStringSubclass(self, text, subclass):
"""Adds a certain piece of text to the tree as a NavigableString
subclass."""
self.endData()
self.handle_data(text)
self.endData(subclass)
def handle_pi(self, text):
"""Handle a processing instruction as a ProcessingInstruction
object, possibly one with a %SOUP-ENCODING% slot into which an
encoding will be plugged later."""
if text[:3] == "xml":
text = u"xml version='1.0' encoding='%SOUP-ENCODING%'"
self._toStringSubclass(text, ProcessingInstruction)
def handle_comment(self, text):
"Handle comments as Comment objects."
self._toStringSubclass(text, Comment)
def handle_charref(self, ref):
"Handle character references as data."
if self.convertEntities:
data = unichr(int(ref))
else:
data = '&#%s;' % ref
self.handle_data(data)
def handle_entityref(self, ref):
"""Handle entity references as data, possibly converting known
HTML and/or XML entity references to the corresponding Unicode
characters."""
data = None
if self.convertHTMLEntities:
try:
data = unichr(name2codepoint[ref])
except KeyError:
pass
if not data and self.convertXMLEntities:
data = self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref)
if not data and self.convertHTMLEntities and \
not self.XML_ENTITIES_TO_SPECIAL_CHARS.get(ref):
# TODO: We've got a problem here. We're told this is
# an entity reference, but it's not an XML entity
# reference or an HTML entity reference. Nonetheless,
# the logical thing to do is to pass it through as an
# unrecognized entity reference.
#
# Except: when the input is "&carol;" this function
# will be called with input "carol". When the input is
# "AT&T", this function will be called with input
# "T". We have no way of knowing whether a semicolon
# was present originally, so we don't know whether
# this is an unknown entity or just a misplaced
# ampersand.
#
# The more common case is a misplaced ampersand, so I
# escape the ampersand and omit the trailing semicolon.
data = "&%s" % ref
if not data:
# This case is different from the one above, because we
# haven't already gone through a supposedly comprehensive
# mapping of entities to Unicode characters. We might not
# have gone through any mapping at all. So the chances are
# very high that this is a real entity, and not a
# misplaced ampersand.
data = "&%s;" % ref
self.handle_data(data)
def handle_decl(self, data):
"Handle DOCTYPEs and the like as Declaration objects."
self._toStringSubclass(data, Declaration)
def parse_declaration(self, i):
"""Treat a bogus SGML declaration as raw data. Treat a CDATA
declaration as a CData object."""
j = None
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
k = len(self.rawdata)
data = self.rawdata[i+9:k]
j = k+3
self._toStringSubclass(data, CData)
else:
try:
j = SGMLParser.parse_declaration(self, i)
except SGMLParseError:
toHandle = self.rawdata[i:]
self.handle_data(toHandle)
j = i + len(toHandle)
return j
class BeautifulSoup(BeautifulStoneSoup):
"""This parser knows the following facts about HTML:
* Some tags have no closing tag and should be interpreted as being
closed as soon as they are encountered.
* The text inside some tags (ie. 'script') may contain tags which
are not really part of the document and which should be parsed
as text, not tags. If you want to parse the text as tags, you can
always fetch it and parse it explicitly.
* Tag nesting rules:
Most tags can't be nested at all. For instance, the occurance of
a <p> tag should implicitly close the previous <p> tag.
<p>Para1<p>Para2
should be transformed into:
<p>Para1</p><p>Para2
Some tags can be nested arbitrarily. For instance, the occurance
of a <blockquote> tag should _not_ implicitly close the previous
<blockquote> tag.
Alice said: <blockquote>Bob said: <blockquote>Blah
should NOT be transformed into:
Alice said: <blockquote>Bob said: </blockquote><blockquote>Blah
Some tags can be nested, but the nesting is reset by the
interposition of other tags. For instance, a <tr> tag should
implicitly close the previous <tr> tag within the same <table>,
but not close a <tr> tag in another table.
<table><tr>Blah<tr>Blah
should be transformed into:
<table><tr>Blah</tr><tr>Blah
but,
<tr>Blah<table><tr>Blah
should NOT be transformed into
<tr>Blah<table></tr><tr>Blah
Differing assumptions about tag nesting rules are a major source
of problems with the BeautifulSoup class. If BeautifulSoup is not
treating as nestable a tag your page author treats as nestable,
try ICantBelieveItsBeautifulSoup, MinimalSoup, or
BeautifulStoneSoup before writing your own subclass."""
def __init__(self, *args, **kwargs):
if not kwargs.has_key('smartQuotesTo'):
kwargs['smartQuotesTo'] = self.HTML_ENTITIES
kwargs['isHTML'] = True
BeautifulStoneSoup.__init__(self, *args, **kwargs)
SELF_CLOSING_TAGS = buildTagMap(None,
('br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base', 'col'))
PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
QUOTE_TAGS = {'script' : None, 'textarea' : None}
#According to the HTML standard, each of these inline tags can
#contain another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_INLINE_TAGS = ('span', 'font', 'q', 'object', 'bdo', 'sub', 'sup',
'center')
#According to the HTML standard, these block tags can contain
#another tag of the same type. Furthermore, it's common
#to actually use these tags this way.
NESTABLE_BLOCK_TAGS = ('blockquote', 'div', 'fieldset', 'ins', 'del')
#Lists can contain other lists, but there are restrictions.
NESTABLE_LIST_TAGS = { 'ol' : [],
'ul' : [],
'li' : ['ul', 'ol'],
'dl' : [],
'dd' : ['dl'],
'dt' : ['dl'] }
#Tables can contain other tables, but there are restrictions.
NESTABLE_TABLE_TAGS = {'table' : [],
'tr' : ['table', 'tbody', 'tfoot', 'thead'],
'td' : ['tr'],
'th' : ['tr'],
'thead' : ['table'],
'tbody' : ['table'],
'tfoot' : ['table'],
}
NON_NESTABLE_BLOCK_TAGS = ('address', 'form', 'p', 'pre')
#If one of these tags is encountered, all tags up to the next tag of
#this type are popped.
RESET_NESTING_TAGS = buildTagMap(None, NESTABLE_BLOCK_TAGS, 'noscript',
NON_NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS,
NESTABLE_TABLE_TAGS)
NESTABLE_TAGS = buildTagMap([], NESTABLE_INLINE_TAGS, NESTABLE_BLOCK_TAGS,
NESTABLE_LIST_TAGS, NESTABLE_TABLE_TAGS)
# Used to detect the charset in a META tag; see start_meta
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def start_meta(self, attrs):
"""Beautiful Soup can detect a charset included in a META tag,
try to convert the document to that charset, and re-parse the
document from the beginning."""
httpEquiv = None
contentType = None
contentTypeIndex = None
tagNeedsEncodingSubstitution = False
for i in range(0, len(attrs)):
key, value = attrs[i]
key = key.lower()
if key == 'http-equiv':
httpEquiv = value
elif key == 'content':
contentType = value
contentTypeIndex = i
if httpEquiv and contentType: # It's an interesting meta tag.
match = self.CHARSET_RE.search(contentType)
if match:
if (self.declaredHTMLEncoding is not None or
self.originalEncoding == self.fromEncoding):
# An HTML encoding was sniffed while converting
# the document to Unicode, or an HTML encoding was
# sniffed during a previous pass through the
# document, or an encoding was specified
# explicitly and it worked. Rewrite the meta tag.
def rewrite(match):
return match.group(1) + "%SOUP-ENCODING%"
newAttr = self.CHARSET_RE.sub(rewrite, contentType)
attrs[contentTypeIndex] = (attrs[contentTypeIndex][0],
newAttr)
tagNeedsEncodingSubstitution = True
else:
# This is our first pass through the document.
# Go through it again with the encoding information.
newCharset = match.group(3)
if newCharset and newCharset != self.originalEncoding:
self.declaredHTMLEncoding = newCharset
self._feed(self.declaredHTMLEncoding)
raise StopParsing
pass
tag = self.unknown_starttag("meta", attrs)
if tag and tagNeedsEncodingSubstitution:
tag.containsSubstitutions = True
class StopParsing(Exception):
pass
class ICantBelieveItsBeautifulSoup(BeautifulSoup):
"""The BeautifulSoup class is oriented towards skipping over
common HTML errors like unclosed tags. However, sometimes it makes
errors of its own. For instance, consider this fragment:
<b>Foo<b>Bar</b></b>
This is perfectly valid (if bizarre) HTML. However, the
BeautifulSoup class will implicitly close the first b tag when it
encounters the second 'b'. It will think the author wrote
"<b>Foo<b>Bar", and didn't close the first 'b' tag, because
there's no real-world reason to bold something that's already
bold. When it encounters '</b></b>' it will close two more 'b'
tags, for a grand total of three tags closed instead of two. This
can throw off the rest of your document structure. The same is
true of a number of other tags, listed below.
It's much more common for someone to forget to close a 'b' tag
than to actually use nested 'b' tags, and the BeautifulSoup class
handles the common case. This class handles the not-co-common
case: where you can't believe someone wrote what they did, but
it's valid HTML and BeautifulSoup screwed up by assuming it
wouldn't be."""
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS = \
('em', 'big', 'i', 'small', 'tt', 'abbr', 'acronym', 'strong',
'cite', 'code', 'dfn', 'kbd', 'samp', 'strong', 'var', 'b',
'big')
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS = ('noscript',)
NESTABLE_TAGS = buildTagMap([], BeautifulSoup.NESTABLE_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_BLOCK_TAGS,
I_CANT_BELIEVE_THEYRE_NESTABLE_INLINE_TAGS)
class MinimalSoup(BeautifulSoup):
"""The MinimalSoup class is for parsing HTML that contains
pathologically bad markup. It makes no assumptions about tag
nesting, but it does know which tags are self-closing, that
<script> tags contain Javascript and should not be parsed, that
META tags may contain encoding information, and so on.
This also makes it better for subclassing than BeautifulStoneSoup
or BeautifulSoup."""
RESET_NESTING_TAGS = buildTagMap('noscript')
NESTABLE_TAGS = {}
class BeautifulSOAP(BeautifulStoneSoup):
"""This class will push a tag with only a single string child into
the tag's parent as an attribute. The attribute's name is the tag
name, and the value is the string child. An example should give
the flavor of the change:
<foo><bar>baz</bar></foo>
=>
<foo bar="baz"><bar>baz</bar></foo>
You can then access fooTag['bar'] instead of fooTag.barTag.string.
This is, of course, useful for scraping structures that tend to
use subelements instead of attributes, such as SOAP messages. Note
that it modifies its input, so don't print the modified version
out.
I'm not sure how many people really want to use this class; let me
know if you do. Mainly I like the name."""
def popTag(self):
if len(self.tagStack) > 1:
tag = self.tagStack[-1]
parent = self.tagStack[-2]
parent._getAttrMap()
if (isinstance(tag, Tag) and len(tag.contents) == 1 and
isinstance(tag.contents[0], NavigableString) and
not parent.attrMap.has_key(tag.name)):
parent[tag.name] = tag.contents[0]
BeautifulStoneSoup.popTag(self)
#Enterprise class names! It has come to our attention that some people
#think the names of the Beautiful Soup parser classes are too silly
#and "unprofessional" for use in enterprise screen-scraping. We feel
#your pain! For such-minded folk, the Beautiful Soup Consortium And
#All-Night Kosher Bakery recommends renaming this file to
#"RobustParser.py" (or, in cases of extreme enterprisiness,
#"RobustParserBeanInterface.class") and using the following
#enterprise-friendly class aliases:
class RobustXMLParser(BeautifulStoneSoup):
pass
class RobustHTMLParser(BeautifulSoup):
pass
class RobustWackAssHTMLParser(ICantBelieveItsBeautifulSoup):
pass
class RobustInsanelyWackAssHTMLParser(MinimalSoup):
pass
class SimplifyingSOAPParser(BeautifulSOAP):
pass
######################################################
#
# Bonus library: Unicode, Dammit
#
# This class forces XML data into a standard format (usually to UTF-8
# or Unicode). It is heavily based on code from Mark Pilgrim's
# Universal Feed Parser. It does not rewrite the XML or HTML to
# reflect a new encoding: that happens in BeautifulStoneSoup.handle_pi
# (XML) and BeautifulSoup.start_meta (HTML).
# Autodetects character encodings.
# Download from http://chardet.feedparser.org/
try:
import chardet
# import chardet.constants
# chardet.constants._debug = 1
except ImportError:
chardet = None
# cjkcodecs and iconv_codec make Python know about more character encodings.
# Both are available from http://cjkpython.i18n.org/
# They're built in if you use Python 2.4.
try:
import cjkcodecs.aliases
except ImportError:
pass
try:
import iconv_codec
except ImportError:
pass
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = { "macintosh" : "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
smartQuotesTo='xml', isHTML=False):
self.declaredHTMLEncoding = None
self.markup, documentEncoding, sniffedEncoding = \
self._detectEncoding(markup, isHTML)
self.smartQuotesTo = smartQuotesTo
self.triedEncodings = []
if markup == '' or isinstance(markup, unicode):
self.originalEncoding = None
self.unicode = unicode(markup)
return
u = None
for proposedEncoding in overrideEncodings:
u = self._convertFrom(proposedEncoding)
if u: break
if not u:
for proposedEncoding in (documentEncoding, sniffedEncoding):
u = self._convertFrom(proposedEncoding)
if u: break
# If no luck and we have auto-detection library, try that:
if not u and chardet and not isinstance(self.markup, unicode):
u = self._convertFrom(chardet.detect(self.markup)['encoding'])
# As a last resort, try utf-8 and windows-1252:
if not u:
for proposed_encoding in ("utf-8", "windows-1252"):
u = self._convertFrom(proposed_encoding)
if u: break
self.unicode = u
if not u: self.originalEncoding = None
def _subMSChar(self, orig):
"""Changes a MS smart quote character to an XML or HTML
entity."""
sub = self.MS_CHARS.get(orig)
if isinstance(sub, tuple):
if self.smartQuotesTo == 'xml':
sub = '&#x%s;' % sub[1]
else:
sub = '&%s;' % sub[0]
return sub
def _convertFrom(self, proposed):
proposed = self.find_codec(proposed)
if not proposed or proposed in self.triedEncodings:
return None
self.triedEncodings.append(proposed)
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if self.smartQuotesTo and proposed.lower() in("windows-1252",
"iso-8859-1",
"iso-8859-2"):
markup = re.compile("([\x80-\x9f])").sub \
(lambda(x): self._subMSChar(x.group(1)),
markup)
try:
# print "Trying to convert document to %s" % proposed
u = self._toUnicode(markup, proposed)
self.markup = u
self.originalEncoding = proposed
except Exception, e:
# print "That didn't work!"
# print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _toUnicode(self, data, encoding):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
# strip Byte Order Mark (if present)
if (len(data) >= 4) and (data[:2] == '\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == '\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == '\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == '\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
newdata = unicode(data, encoding)
return newdata
def _detectEncoding(self, xml_data, isHTML=False):
"""Given a document, tries to detect its XML encoding."""
xml_encoding = sniffed_xml_encoding = None
try:
if xml_data[:4] == '\x4c\x6f\xa7\x94':
# EBCDIC
xml_data = self._ebcdic_to_ascii(xml_data)
elif xml_data[:4] == '\x00\x3c\x00\x3f':
# UTF-16BE
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data, 'utf-16be').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xfe\xff') \
and (xml_data[2:4] != '\x00\x00'):
# UTF-16BE with BOM
sniffed_xml_encoding = 'utf-16be'
xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x3f\x00':
# UTF-16LE
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data, 'utf-16le').encode('utf-8')
elif (len(xml_data) >= 4) and (xml_data[:2] == '\xff\xfe') and \
(xml_data[2:4] != '\x00\x00'):
# UTF-16LE with BOM
sniffed_xml_encoding = 'utf-16le'
xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\x00\x3c':
# UTF-32BE
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data, 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\x3c\x00\x00\x00':
# UTF-32LE
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data, 'utf-32le').encode('utf-8')
elif xml_data[:4] == '\x00\x00\xfe\xff':
# UTF-32BE with BOM
sniffed_xml_encoding = 'utf-32be'
xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8')
elif xml_data[:4] == '\xff\xfe\x00\x00':
# UTF-32LE with BOM
sniffed_xml_encoding = 'utf-32le'
xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8')
elif xml_data[:3] == '\xef\xbb\xbf':
# UTF-8 with BOM
sniffed_xml_encoding = 'utf-8'
xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8')
else:
sniffed_xml_encoding = 'ascii'
pass
except:
xml_encoding_match = None
xml_encoding_match = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
if not xml_encoding_match and isHTML:
regexp = re.compile('<\s*meta[^>]+charset=([^>]*?)[;\'">]', re.I)
xml_encoding_match = regexp.search(xml_data)
if xml_encoding_match is not None:
xml_encoding = xml_encoding_match.groups()[0].lower()
if isHTML:
self.declaredHTMLEncoding = xml_encoding
if sniffed_xml_encoding and \
(xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode',
'iso-10646-ucs-4', 'ucs-4', 'csucs4',
'utf-16', 'utf-32', 'utf_16', 'utf_32',
'utf16', 'u16')):
xml_encoding = sniffed_xml_encoding
return xml_data, xml_encoding, sniffed_xml_encoding
def find_codec(self, charset):
return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \
or (charset and self._codec(charset.replace("-", ""))) \
or (charset and self._codec(charset.replace("-", "_"))) \
or charset
def _codec(self, charset):
if not charset: return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
EBCDIC_TO_ASCII_MAP = None
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
201,202,106,107,108,109,110,111,112,113,114,203,204,205,
206,207,208,209,126,115,116,117,118,119,120,121,122,210,
211,212,213,214,215,216,217,218,219,220,221,222,223,224,
225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
250,251,252,253,254,255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
return s.translate(c.EBCDIC_TO_ASCII_MAP)
MS_CHARS = { '\x80' : ('euro', '20AC'),
'\x81' : ' ',
'\x82' : ('sbquo', '201A'),
'\x83' : ('fnof', '192'),
'\x84' : ('bdquo', '201E'),
'\x85' : ('hellip', '2026'),
'\x86' : ('dagger', '2020'),
'\x87' : ('Dagger', '2021'),
'\x88' : ('circ', '2C6'),
'\x89' : ('permil', '2030'),
'\x8A' : ('Scaron', '160'),
'\x8B' : ('lsaquo', '2039'),
'\x8C' : ('OElig', '152'),
'\x8D' : '?',
'\x8E' : ('#x17D', '17D'),
'\x8F' : '?',
'\x90' : '?',
'\x91' : ('lsquo', '2018'),
'\x92' : ('rsquo', '2019'),
'\x93' : ('ldquo', '201C'),
'\x94' : ('rdquo', '201D'),
'\x95' : ('bull', '2022'),
'\x96' : ('ndash', '2013'),
'\x97' : ('mdash', '2014'),
'\x98' : ('tilde', '2DC'),
'\x99' : ('trade', '2122'),
'\x9a' : ('scaron', '161'),
'\x9b' : ('rsaquo', '203A'),
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
'\x9f' : ('Yuml', ''),}
#######################################################################
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| Python |
# -*- coding: utf-8 -*-
import wsgiref.handlers
import sys
import os
import string
import codecs
import random
import re
import cgi
import datetime
import time
import math
import pickle
import logging
import Cookie
import urllib
import urllib2
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
LIB_PATH = os.path.join(DIR_PATH, "lib")
EXTRA_PATHS = []
for path in os.listdir(LIB_PATH):
fullpath = os.path.join(LIB_PATH, path)
if os.path.isdir(fullpath) and not path.startswith("."):
EXTRA_PATHS.append(fullpath)
sys.path = sys.path + EXTRA_PATHS
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
webapp.template.register_template_library('smart_if')
template.register_template_library('django.contrib.markup.templatetags.markup')
from math import sqrt
from google.appengine.api import mail
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
from google.appengine.ext import search
from google.appengine.ext import db
from google.appengine.api import urlfetch
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext.webapp.util import run_wsgi_app
from datetime import datetime
from google.appengine.api import images
from models import *
from utilities import *
from django.utils import simplejson
from translator import *
from paging import *
from google.appengine.ext.webapp import template
# Set to true if we want to have our webapp print stack traces, etc
_DEBUG = True
class BaseRequestHandler(webapp.RequestHandler):
"""Supplies a common template generation function.
When you call generate(), we augment the template variables supplied with
the variables present in this function.
"""
def generate(self, template_name, template_values={}):
logincontrol = LoginControl()
access = logincontrol.CheckAccess('admin')
if access == 1:
logged = True
else:
logged = False
queryitems = Categoria.all()
queryitems.filter(' menu = ', 'main')
queryitems.order('singular')
mcategorias = queryitems.fetch(500)
queryitems = Categoria.all()
queryitems.filter(' menu = ', 'secondary')
queryitems.order('singular')
scategorias = queryitems.fetch(500)
values = {
'request': self.request,
'mcategorias': mcategorias,
'scategorias': scategorias,
'application_name': 'qstack',
'logged': logged
}
values.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('templates', template_name))
self.response.out.write(template.render(path, values, debug=_DEBUG))
class Index(BaseRequestHandler):
def get(self):
queryitems = Cosa.all()
queryitems.order('-cosaId')
items = queryitems.fetch(300)
cosas = []
for item in items:
if item.title != '':
cosas.append(item)
template_values = {
'cosas': cosas[:20],
}
self.generate('index.html', template_values)
class GoogleVerification(BaseRequestHandler):
def get(self, page = 1):
template_values = {
}
self.generate('googleverification.html', template_values)
class Tipos(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('tipos.html', template_values)
class Patrones(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('patrones.html', template_values)
class PatronesTag(BaseRequestHandler):
def get(self, tag):
template_values = {
}
self.generate('patrones-tag.html', template_values)
class Comprar(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('comprar.html', template_values)
class TiendasBroches(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('tiendas-broches.html', template_values)
class Videos(BaseRequestHandler):
def get(self):
queryitems = Video.all()
queryitems.order('-pubDate')
videos = queryitems.fetch(500)
template_values = {
'videos': videos
}
self.generate('videos.html', template_values)
class VideoPage(BaseRequestHandler):
def get(self, slug):
queryitems = Video.all()
queryitems.filter(' slug = ', slug)
items = queryitems.fetch(1)
video = items[0]
template_values = {
'video': video,
}
self.generate('video.html', template_values)
class Revistas(BaseRequestHandler):
def get(self):
queryitems = Revista.all()
queryitems.order('-pubDate')
revistas = queryitems.fetch(500)
template_values = {
'revistas': revistas
}
self.generate('revistas.html', template_values)
class RevistaPage(BaseRequestHandler):
def get(self, slug):
queryitems = Revista.all()
queryitems.filter(' slug = ', slug)
revistas = queryitems.fetch(1)
revista = revistas[0]
template_values = {
'revista': revista,
}
self.generate('revista.html', template_values)
class Libros(BaseRequestHandler):
def get(self):
queryitems = Libro.all()
queryitems.order('-pubDate')
libros = queryitems.fetch(500)
template_values = {
'libros': libros
}
self.generate('libros.html', template_values)
class LibroPage(BaseRequestHandler):
def get(self, slug):
queryitems = Libro.all()
queryitems.filter(' slug = ', slug)
libros = queryitems.fetch(1)
libro = libros[0]
template_values = {
'libro': libro,
}
self.generate('libro.html', template_values)
class LibrosTipo(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('libros-tipo.html', template_values)
class Manualidades(BaseRequestHandler):
def get(self):
queryitems = Tag.all()
queryitems.order('singular')
tags = queryitems.fetch(500)
template_values = {
'tags': tags,
}
self.generate('manualidades.html', template_values)
class Blogs(BaseRequestHandler):
def get(self):
queryblogs = Blog.all()
queryblogs.order('-pubDate')
blogs = queryblogs.fetch(500)
template_values = {
'blogs': blogs
}
self.generate('blogs.html', template_values)
class BlogPage(BaseRequestHandler):
def get(self, slug):
queryblogs = Blog.all()
queryblogs.filter(' slug = ', slug)
blogs = queryblogs.fetch(1)
blog = blogs[0]
queryblogs = Blog.all()
queryblogs.order('-pubDate')
blogs = queryblogs.fetch(7)
template_values = {
'blog': blog,
'blogs': blogs,
}
self.generate('blog.html', template_values)
class Privacidad(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('politica-privacidad.html', template_values)
class Historia(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('historia.html', template_values)
class QueEs(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('que-es.html', template_values)
class Usos(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('usos.html', template_values)
class Tipos(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('tipos.html', template_values)
class Herramientas(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('herramientas.html', template_values)
class Pegamento(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('pegamento.html', template_values)
class Tijeras(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('tijeras.html', template_values)
class QuienesSomos(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('quienes-somos.html', template_values)
class Enlaces(BaseRequestHandler):
def get(self):
template_values = {
}
self.generate('enlaces.html', template_values)
class Contacto(BaseRequestHandler):
def get(self):
template_values = {}
self.generate('contacto.html', template_values)
def post(self):
if self.request.get("send") != "" and self.request.get("email") != "":
name = self.request.get("name")
email = self.request.get("email")
message = self.request.get("message")
# Send an email to shop owners
mailtosend = mail.EmailMessage(sender="anabelen@gmail.com",subject="Mensaje fieltro.org de " + name + " - " + email)
mailtosend.to = "anabelen@gmail.com"
mailtosend.reply_to = email
mailtosend.body = message
try:
mailtosend.send()
except:
p=0
template_values = {
'sent': True,
}
self.generate('contacto.html', template_values)
else:
template_values = {}
self.generate('contacto.html', template_values)
class Tutoriales(BaseRequestHandler):
def get(self):
queryitems = Tutorial.all()
queryitems.order('-pubDate')
tutoriales = queryitems.fetch(500)
template_values = {
'tutoriales': tutoriales
}
self.generate('tutoriales.html', template_values)
class TutorialPage(BaseRequestHandler):
def get(self, slug):
queryitems = Tutorial.all()
queryitems.filter(' slug = ', slug)
items = queryitems.fetch(1)
tutorial = items[0]
template_values = {
'tutorial': tutorial
}
self.generate('tutorial.html', template_values)
class SiteMap(BaseRequestHandler):
def get(self):
queryitems = Blog.all()
blogs = queryitems.fetch(500)
queryitems = Revista.all()
revistas = queryitems.fetch(500)
queryitems = Video.all()
videos = queryitems.fetch(500)
queryitems = Libro.all()
libros = queryitems.fetch(500)
queryitems = Tutorial.all()
tutoriales = queryitems.fetch(500)
template_values = {
'blogs': blogs,
'revistas': revistas,
'videos': videos,
'libros': libros,
'tutoriales': tutoriales,
}
print "Content-Type: text/xml"
self.generate('sitemap.xml', template_values)
class Cosas(BaseRequestHandler):
def get(self):
queryitems = Cosa.all()
queryitems.order('-cosaId')
cosas = queryitems.fetch(500)
template_values = {
'cosas': cosas,
}
self.generate('cosas.html', template_values)
class Fotos(BaseRequestHandler):
def get(self):
queryitems = Cosa.all()
queryitems.order('-cosaId')
offs = 500
cosas = queryitems.fetch(500, offset=offs)
template_values = {
'cosas': cosas,
}
self.generate('fotos.html', template_values)
class CosaPage(BaseRequestHandler):
def get(self, slug):
queryposts = Cosa.all()
queryposts.filter(' slug = ', slug)
cosas = queryposts.fetch(1)
if len(cosas) > 0:
cosa = cosas[0]
else:
cosa = Cosa.get(slug)
tag = cosa.tag
categoria = cosa.categoria
queryposts = Cosa.all()
queryposts.filter(' tag = ', tag)
tagCosas = queryposts.fetch(12)
queryitems = Cosa.all()
queryitems.filter(' categoria = ', categoria)
categoriaCosas = queryitems.fetch(12)
template_values = {
'cosa': cosa,
'tagCosas': tagCosas,
'categoriaCosas': categoriaCosas,
}
self.generate('cosa.html', template_values)
class Favelet(BaseRequestHandler):
def get(self):
queryitems = Categoria.all()
queryitems.order('plural')
categorias = queryitems.fetch(500)
queryitems = Tag.all()
queryitems.order('plural')
tags = queryitems.fetch(500)
template_values = {
'categorias': categorias,
'tags': tags,
}
self.generate('dfavelet.js', template_values)
class CategoriaTag(BaseRequestHandler):
def get(self, slug):
queryitems = Categoria.all()
queryitems.filter(' pluralSlug = ', slug)
items = queryitems.fetch(1)
if len(items) > 0:
categoria = items[0]
queryitems = Cosa.all()
queryitems.filter(' categoriaSlug = ', slug)
queryitems.order('-cosaId')
cosas = queryitems.fetch(500)
template_values = {
'categoria': categoria,
'cosas': cosas,
}
self.generate('categoria.html', template_values)
else:
queryitems = Tag.all()
queryitems.filter(' pluralSlug = ', slug)
items = queryitems.fetch(1)
tag = items[0]
queryitems = Cosa.all()
queryitems.filter(' tagSlug = ', slug)
queryitems.order('-cosaId')
cosas = queryitems.fetch(500)
template_values = {
'tag': tag,
'cosas': cosas,
}
self.generate('tag.html', template_values)
class PatronesCategoriaTag(BaseRequestHandler):
def get(self, slug):
queryitems = Categoria.all()
queryitems.filter(' pluralSlug = ', slug)
items = queryitems.fetch(1)
if len(items) > 0:
categoriatag = items[0]
queryitems = Cosa.all()
queryitems.filter(' categoriaSlug = ', slug)
queryitems.order('-cosaId')
cosas = queryitems.fetch(500)
template_values = {
'categoriatag': categoriatag,
'cosas': cosas,
}
self.generate('patrones-tag.html', template_values)
else:
queryitems = Tag.all()
queryitems.filter(' pluralSlug = ', slug)
items = queryitems.fetch(1)
categoriatag = items[0]
queryitems = Cosa.all()
queryitems.filter(' tagSlug = ', slug)
queryitems.order('-cosaId')
cosas = queryitems.fetch(500)
template_values = {
'categoriatag': categoriatag,
'cosas': cosas,
}
self.generate('patrones-tag.html', template_values)
class CosaImg(BaseRequestHandler):
def get(self, slug):
queryposts = Cosa.all()
queryposts.filter(' slug = ', slug)
cosas = queryposts.fetch(1)
if len(cosas) > 0:
cosa = cosas[0]
self.response.headers['Content-Type'] = "image/jpeg"
self.response.headers['Cache-Control'] = "public, max-age=363600"
self.response.out.write(cosa.bytes_content)
else:
#self.error(500)
#errorlauncher = ErrorLauncher()
#errorlauncher.Launch(self,404)
#return 1
cosa = Cosa.get(slug)
self.response.headers['Content-Type'] = "image/jpeg"
self.response.headers['Cache-Control'] = "public, max-age=363600"
self.response.out.write(cosa.bytes_content)
class CosaImgResized(webapp.RequestHandler):
def get(self, slug, width, height, action):
queryposts = Cosa.all()
queryposts.filter(' slug = ', slug)
cosas = queryposts.fetch(1)
if len(cosas) > 0:
myfile = cosas[0]
if action=='resized':
if height=='auto':
content = myfile.bytes_content
img=images.Image(content)
originalwidth = float(img.width)
originalheight = float(img.height)
resizedwidth = float(width)
resizedheight = float(float(originalheight*resizedwidth)/originalwidth)
resizedcontent = images.resize(content, int(resizedwidth), int(resizedheight))
elif width=='auto':
content = myfile.bytes_content
img=images.Image(content)
originalwidth = float(img.width)
originalheight = float(img.height)
resizedheight = float(height)
resizedwidth = float(float(originalwidth*resizedheight)/originalheight)
resizedcontent = images.resize(content, int(resizedwidth), int(resizedheight))
else:
content = myfile.bytes_content
img=images.Image(content)
resizedwidth=float(width)
resizedheight=float(height)
resizedcontent = images.resize(content, int(resizedwidth), int(resizedheight))
image = images.Image(resizedcontent)
image.crop(0.0, 0.0, 1.0, 1.0)
image_data = image.execute_transforms(output_encoding=images.JPEG, quality=100)
self.response.headers['Content-Type'] = "image/jpeg"
self.response.headers['Cache-Control'] = "public, max-age=363600"
self.response.out.write(image_data)
elif action=='cropped':
width=float(width)
height=float(height)
content = myfile.bytes_content
img=images.Image(content)
originalwidth = float(img.width)
originalheight = float(img.height)
resizedwidth = width
if originalwidth>=originalheight:
resizedheight = height
resizedwidth = (originalwidth*resizedheight)/originalheight
if resizedwidth<=width:
resizedwidth = width
resizedheight = (originalheight*resizedwidth)/originalwidth
resizedcontent = images.resize(content, int(resizedwidth), int(resizedheight))
resizedimg=images.Image(resizedcontent)
originalwidth = resizedimg.width
originalheight = resizedimg.height
relativewidth = (width/originalwidth)
relativeheight = (height/originalheight)
borderwidth = math.fabs((1 - relativewidth)/2)
borderheight = math.fabs((1 - relativeheight)/2)
left_x = borderwidth
right_x = 1 - borderwidth
top_y = borderheight
bottom_y = 1 - borderheight
#croppedcontent=images.crop(resizedcontent, left_x, top_y, right_x, bottom_y)
image = images.Image(resizedcontent)
image.crop(left_x, top_y, right_x, bottom_y)
image_data = image.execute_transforms(output_encoding=images.JPEG, quality=100)
self.response.headers['Content-Type'] = "image/jpeg"
self.response.headers['Cache-Control'] = "public, max-age=363600"
self.response.out.write(image_data)
else:
image = images.Image(file.bytes_content)
image.crop(0.0, 0.0, 1.0, 1.0)
image_data = image.execute_transforms(output_encoding=images.JPEG, quality=100)
self.response.headers['Content-Type'] = "image/jpeg"
self.response.headers['Cache-Control'] = "public, max-age=363600"
self.response.out.write(image_data)
else:
self.error(500)
errorlauncher = ErrorLauncher()
errorlauncher.Launch(self,500)
return 1
class SaveFavelet(BaseRequestHandler):
def get(self):
url = urllib.unquote(self.request.get("u"))
title = urllib.unquote(self.request.get("t"))
image = urllib.unquote(self.request.get("i"))
slug = urllib.unquote(self.request.get("s"))
category = urllib.unquote(self.request.get("c"))
categorySlug = urllib.unquote(self.request.get("cs"))
tag = urllib.unquote(self.request.get("ta"))
tagSlug = urllib.unquote(self.request.get("tas"))
description = urllib.unquote(self.request.get("d"))
result = urlfetch.fetch(image)
content = result.content
img = images.Image(content)
unacosa = Cosa()
unacosa.bytes_content = db.Blob(content)
unacosa.width = img.width
unacosa.height = img.height
unacosa.title = title
unacosa.url = url
unacosa.urlimg = image
unacosa.slug = slug
unacosa.tag = tag
unacosa.tagSlug = tagSlug
unacosa.categoria = category
unacosa.categoriaSlug = categorySlug
unacosa.description = description
queryitem = Cosa.all()
queryitem.order('-cosaId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].cosaId
cosaId = lastId + 1
unacosa.cosaId = cosaId
unacosa.put()
self.response.out.write("Saved!");
def main():
urlmap = [
('/', Index),
('/page/(.*)/', Index),
('/google03e298e060fd8943.html', GoogleVerification),
('/blogs', Blogs),
('/blog/(.*)', BlogPage),
('/libros', Libros),
('/libros/(.*)', LibrosTipo),
('/libro/(.*)', LibroPage),
('/revistas', Revistas),
('/revista/(.*)', RevistaPage),
('/manualidades', Manualidades),
('/comprar', Comprar),
('/videos', Videos),
('/video/(.*)', VideoPage),
('/patrones', Patrones),
('/patrones/(.*)', PatronesCategoriaTag),
('/cosa', CosaPage),
('/fotos', Fotos),
('/politica-privacidad', Privacidad),
('/historia', Historia),
('/tipos', Tipos),
('/usos', Usos),
('/que-es', QueEs),
('/quienes-somos', QuienesSomos),
('/contacto', Contacto),
('/enlaces', Enlaces),
('/tutoriales', Tutoriales),
('/tutorial/(.*)', TutorialPage),
('/save/from_favelet', SaveFavelet),
('/herramientas', Herramientas),
('/pegamento', Pegamento),
('/tijeras', Tijeras),
('/cosas', Cosas),
('/foto/(.*)', CosaImg),
('/cosa/(.*)/img', CosaImg),
('/cosa/(.*)/img/(.*)/(.*)/(resized)/', CosaImgResized),
('/cosa/(.*)/img/(.*)/(.*)/(cropped)/', CosaImgResized),
('/cosa/(.*)', CosaPage),
('/sitemap.xml', SiteMap),
('/favelet', Favelet),
('/(.*)', CategoriaTag),
]
application = webapp.WSGIApplication(urlmap, debug=_DEBUG)
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
import urllib
import urllib2
from django.utils import simplejson
baseUrl = "http://ajax.googleapis.com/ajax/services/language/translate"
def getSplits(text, splitLength = 4500):
'''
Translate Api has a limit on length of text(4500 characters) that can be translated at once,
'''
return (text[index:index+splitLength] for index in xrange(0,len(text),splitLength))
def translate(text, src='', to='en'):
'''
A Python Wrapper for Google AJAX Language API:
* Uses Google Language Detection, in cases source language is not provided with the source text
* Splits up text if it's longer then 4500 characters, as a limit put up by the API
'''
params = ({'langpair': '%s|%s' % (src, to),
'v': '1.0'
})
retText=''
for text in getSplits(text):
params['q'] = text
resp = simplejson.load(urllib.urlopen('%s' % (baseUrl), data = urllib.urlencode(params)))
try:
retText += resp['responseData']['translatedText']
except:
raise
return retText | Python |
# -*- coding: utf-8 -*-
import os
import re
from models import *
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.datastore import entity_pb
def serialize_entities(models):
if models is None:
return None
elif isinstance(models, db.Model):
# Just one instance
return db.model_to_protobuf(models).Encode()
else:
# A list
return [db.model_to_protobuf(x).Encode() for x in models]
def deserialize_entities(data):
if data is None:
return None
elif isinstance(data, str):
# Just one instance
return db.model_from_protobuf(entity_pb.EntityProto(data))
else:
return [db.model_from_protobuf(entity_pb.EntityProto(x)) for x in data]
class LoginControl():
def CheckLogin(self,role,externalself):
notlogged = True
permisiondeny = True
user = users.get_current_user()
if str(user)=='None':
notlogged = True
else:
queryuser = CustomUser.all()
queryuser.filter('user = ',user)
if queryuser.count()>0:
notlogged = False
dbusers = queryuser.fetch(1)
dbuser = dbusers[0]
if dbuser.role==role or dbuser.role=='admin':
permisiondeny = False
return dbuser.role
else:
permisiondeny = True
else:
notlogged = True
if os.environ.has_key('REQUEST_URI'):
url=str(os.environ['REQUEST_URI'])+"\n\n"
else:
url = "http://www.fieltro.org/admin/"
if notlogged:
externalself.redirect(users.create_login_url(url))
return 0
if permisiondeny:
externalself.redirect("http://www.fieltro.org")
return 0
def CheckAccess(self,role):
notlogged = False
permisiondeny = False
user = users.get_current_user()
if str(user)=='None':
notlogged = True
else:
queryuser = CustomUser.all()
queryuser.filter('user = ',user)
if queryuser.count()>0:
dbusers = queryuser.fetch(1)
dbuser = dbusers[0]
if dbuser.role==role or dbuser.role=='admin':
return 1
else:
permisiondeny = True
else:
notlogged = True
if notlogged:
return 0
if permisiondeny:
return 0
class encodingHell():
def removeEsChars(self, string):
string = string.replace(u'á', u'a')
string = string.replace(u'é', u'e')
string = string.replace(u'í', u'i')
string = string.replace(u'ó', u'o')
string = string.replace(u'ú', u'u')
string = string.replace(u'ñ', u'n')
string = string.replace(u'ç', u'c')
string = string.replace(u'"', u"'")
string = string.replace(u'\n', u'')
return string
def slugify(string):
string = re.sub('\s+', '_', string)
string = re.sub('[^\w.-]', '', string)
return string.strip('_.- ').lower() | Python |
from google.appengine.ext import db
from google.appengine.ext import search
##################################################################################
# #
# Models #
# #
##################################################################################
class Blog(db.Model):
blogId = db.IntegerProperty()
name = db.StringProperty()
slug = db.StringProperty()
url = db.StringProperty()
description = db.TextProperty()
firstMonth = db.StringProperty()
firstYear = db.StringProperty()
firstPost = db.DateTimeProperty()
country = db.StringProperty()
author = db.StringProperty()
email = db.StringProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
twitter = db.StringProperty()
percentage = db.IntegerProperty()
class Libro(db.Model):
libroId = db.IntegerProperty()
name = db.StringProperty()
slug = db.StringProperty()
description = db.TextProperty()
country = db.StringProperty()
author = db.StringProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
language = db.StringProperty()
class Revista(db.Model):
revistaId = db.IntegerProperty()
name = db.StringProperty()
slug = db.StringProperty()
description = db.TextProperty()
country = db.StringProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
language = db.StringProperty()
class Tutorial(db.Model):
tutorialId = db.IntegerProperty()
title = db.StringProperty()
slug = db.StringProperty()
text = db.TextProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
author = db.StringProperty()
class Video(db.Model):
videoId = db.IntegerProperty()
title = db.StringProperty()
slug = db.StringProperty()
description = db.TextProperty()
videoDate = db.DateTimeProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
author = db.StringProperty()
url = db.StringProperty()
code = db.StringProperty()
class Tienda(db.Model):
tiendaId = db.IntegerProperty()
name = db.StringProperty()
slug = db.StringProperty()
description = db.TextProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
owner = db.StringProperty()
url = db.StringProperty()
email = db.StringProperty()
address = db.StringProperty()
location = db.StringProperty()
country = db.StringProperty()
class Cosa(db.Model):
cosaId = db.IntegerProperty()
title = db.StringProperty()
slug = db.StringProperty()
url = db.StringProperty()
urlimg = db.StringProperty()
bytes_content = db.BlobProperty()
width = db.IntegerProperty()
height = db.IntegerProperty()
description = db.TextProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
tags = db.StringListProperty()
featured = db.BooleanProperty(default=False)
tag = db.StringProperty()
tagSlug = db.StringProperty()
categoria = db.StringProperty()
categoriaSlug = db.StringProperty()
class Comentario(db.Model):
comentarioId = db.IntegerProperty()
objeto = db.StringProperty()
objetoId = db.StringProperty()
url = db.StringProperty()
email = db.StringProperty()
name = db.StringProperty()
text = db.TextProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
class Categoria(db.Model):
categoriaId = db.IntegerProperty()
singular = db.StringProperty()
singularSlug = db.StringProperty()
plural = db.StringProperty()
pluralSlug = db.StringProperty()
menu = db.StringProperty()
class Tag(db.Model):
tagId = db.IntegerProperty()
singular = db.StringProperty()
singularSlug = db.StringProperty()
plural = db.StringProperty()
pluralSlug = db.StringProperty()
father = db.StringProperty()
fatherSlug = db.StringProperty()
class Comentario(db.Model):
comentarioId = db.IntegerProperty()
objectType = db.IntegerProperty()
objectId = db.IntegerProperty()
slug = db.StringProperty()
author = db.StringProperty()
email = db.StringProperty()
url = db.StringProperty()
text = db.TextProperty()
pubDate = db.DateTimeProperty(auto_now_add=True)
ip = db.StringProperty()
class CustomUser(db.Model):
name = db.StringProperty()
user = db.UserProperty()
role = db.StringProperty() | Python |
"""
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
"""
import unittest
from django import template
from google.appengine.ext import webapp
register = webapp.template.create_template_register()
#==============================================================================
# Calculation objects
#==============================================================================
class BaseCalc(object):
def __init__(self, var1, var2=None, negate=False):
self.var1 = var1
self.var2 = var2
self.negate = negate
def resolve(self, context):
try:
var1, var2 = self.resolve_vars(context)
outcome = self.calculate(var1, var2)
except:
outcome = False
if self.negate:
return not outcome
return outcome
def resolve_vars(self, context):
var2 = self.var2 and self.var2.resolve(context)
return self.var1.resolve(context), var2
def calculate(self, var1, var2):
raise NotImplementedError()
class Or(BaseCalc):
def calculate(self, var1, var2):
return var1 or var2
class And(BaseCalc):
def calculate(self, var1, var2):
return var1 and var2
class Equals(BaseCalc):
def calculate(self, var1, var2):
return var1 == var2
class Greater(BaseCalc):
def calculate(self, var1, var2):
return var1 > var2
class GreaterOrEqual(BaseCalc):
def calculate(self, var1, var2):
return var1 >= var2
class In(BaseCalc):
def calculate(self, var1, var2):
return var1 in var2
#==============================================================================
# Tests
#==============================================================================
class TestVar(object):
"""
A basic self-resolvable object similar to a Django template variable. Used
to assist with tests.
"""
def __init__(self, value):
self.value = value
def resolve(self, context):
return self.value
class SmartIfTests(unittest.TestCase):
def setUp(self):
self.true = TestVar(True)
self.false = TestVar(False)
self.high = TestVar(9000)
self.low = TestVar(1)
def assertCalc(self, calc, context=None):
"""
Test a calculation is True, also checking the inverse "negate" case.
"""
context = context or {}
self.assert_(calc.resolve(context))
calc.negate = not calc.negate
self.assertFalse(calc.resolve(context))
def assertCalcFalse(self, calc, context=None):
"""
Test a calculation is False, also checking the inverse "negate" case.
"""
context = context or {}
self.assertFalse(calc.resolve(context))
calc.negate = not calc.negate
self.assert_(calc.resolve(context))
def test_or(self):
self.assertCalc(Or(self.true))
self.assertCalcFalse(Or(self.false))
self.assertCalc(Or(self.true, self.true))
self.assertCalc(Or(self.true, self.false))
self.assertCalc(Or(self.false, self.true))
self.assertCalcFalse(Or(self.false, self.false))
def test_and(self):
self.assertCalc(And(self.true, self.true))
self.assertCalcFalse(And(self.true, self.false))
self.assertCalcFalse(And(self.false, self.true))
self.assertCalcFalse(And(self.false, self.false))
def test_equals(self):
self.assertCalc(Equals(self.low, self.low))
self.assertCalcFalse(Equals(self.low, self.high))
def test_greater(self):
self.assertCalc(Greater(self.high, self.low))
self.assertCalcFalse(Greater(self.low, self.low))
self.assertCalcFalse(Greater(self.low, self.high))
def test_greater_or_equal(self):
self.assertCalc(GreaterOrEqual(self.high, self.low))
self.assertCalc(GreaterOrEqual(self.low, self.low))
self.assertCalcFalse(GreaterOrEqual(self.low, self.high))
def test_in(self):
list_ = TestVar([1,2,3])
invalid_list = TestVar(None)
self.assertCalc(In(self.low, list_))
self.assertCalcFalse(In(self.low, invalid_list))
def test_parse_bits(self):
var = IfParser([True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'and', True]).parse()
self.assertFalse(var.resolve({}))
var = IfParser(['not', False, 'and', 'not', False]).parse()
self.assert_(var.resolve({}))
var = IfParser(['not', 'not', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, 'not', '=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'not', 'not', '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '!=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([3, '>', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '<', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([2, 'not', 'in', [2, 3]]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'or', 1, '=', 2]).parse()
self.assert_(var.resolve({}))
def test_boolean(self):
var = IfParser([True, 'and', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', False, 'or', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'and', True, 'or', False]).parse()
self.assertFalse(var.resolve({}))
def test_invalid(self):
self.assertRaises(ValueError, IfParser(['not']).parse)
self.assertRaises(ValueError, IfParser(['==']).parse)
self.assertRaises(ValueError, IfParser([1, 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '>', 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '==', 'not', 'not']).parse)
self.assertRaises(ValueError, IfParser([1, 2]).parse)
OPERATORS = {
'=': (Equals, True),
'==': (Equals, True),
'!=': (Equals, False),
'>': (Greater, True),
'>=': (GreaterOrEqual, True),
'<=': (Greater, False),
'<': (GreaterOrEqual, False),
'or': (Or, True),
'and': (And, True),
'in': (In, True),
}
BOOL_OPERATORS = ('or', 'and')
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
self.tokens = tokens
def _get_tokens(self):
return self._tokens
def _set_tokens(self, tokens):
self._tokens = tokens
self.len = len(tokens)
self.pos = 0
tokens = property(_get_tokens, _set_tokens)
def parse(self):
if self.at_end():
raise self.error_class('No variables provided.')
var1 = self.get_bool_var()
while not self.at_end():
op, negate = self.get_operator()
var2 = self.get_bool_var()
var1 = op(var1, var2, negate=negate)
return var1
def get_token(self, eof_message=None, lookahead=False):
negate = True
token = None
pos = self.pos
while token is None or token == 'not':
if pos >= self.len:
if eof_message is None:
raise self.error_class()
raise self.error_class(eof_message)
token = self.tokens[pos]
negate = not negate
pos += 1
if not lookahead:
self.pos = pos
return token, negate
def at_end(self):
return self.pos >= self.len
def create_var(self, value):
return TestVar(value)
def get_bool_var(self):
"""
Returns either a variable by itself or a non-boolean operation (such as
``x == 0`` or ``x < 0``).
This is needed to keep correct precedence for boolean operations (i.e.
``x or x == 0`` should be ``x or (x == 0)``, not ``(x or x) == 0``).
"""
var = self.get_var()
if not self.at_end():
op_token = self.get_token(lookahead=True)[0]
if isinstance(op_token, basestring) and (op_token not in
BOOL_OPERATORS):
op, negate = self.get_operator()
return op(var, self.get_var(), negate=negate)
return var
def get_var(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting a variable.')
if isinstance(token, basestring) and token in OPERATORS:
raise self.error_class('Expected variable, got operator (%s).' %
token)
var = self.create_var(token)
if negate:
return Or(var, negate=True)
return var
def get_operator(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting an operator.')
if not isinstance(token, basestring) or token not in OPERATORS:
raise self.error_class('%s is not a valid operator.' % token)
if self.at_end():
raise self.error_class('No variable provided after "%s".' % token)
op, true = OPERATORS[token]
if not true:
negate = not negate
return op, negate
#==============================================================================
# Actual templatetag code.
#==============================================================================
class TemplateIfParser(IfParser):
error_class = template.TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
return super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return self.template_parser.compile_filter(value)
class SmartIfNode(template.Node):
def __init__(self, var, nodelist_true, nodelist_false=None):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.var = var
def render(self, context):
if self.var.resolve(context):
return self.nodelist_true.render(context)
if self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def __repr__(self):
return "<Smart If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
if self.nodelist_false:
for node in self.nodelist_false:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_true.get_nodes_by_type(nodetype))
if self.nodelist_false:
nodes.extend(self.nodelist_false.get_nodes_by_type(nodetype))
return nodes
@register.tag('if')
def smart_if(parser, token):
"""
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
"""
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = None
return SmartIfNode(var, nodelist_true, nodelist_false)
if __name__ == '__main__':
unittest.main()
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
datastore_cache.py
Created by Alkis Evlogimenos on 2009-04-19.
Modified by keakon on 2010-10-31.
"""
import itertools
#import logging
import threading
from google.appengine.api import memcache
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
"""Provides a shim that caches datastore Get calls.
Example code:
import datastore_cache
datastore_cache.DatastoreCachingShim.Install()
# ...
def main(args):
util.run_wsgi_app(application)
"""
_QUERY_CACHE_TIME = 600
class APIProxyShim(object):
"""A generic shim class, with methods to install/uninstall it.
Subclasses of this class can be used to replace the real stub for a service,
intercepting and possibly passing on calls to the original stub.
"""
SERVICE_NAME = None # To be overridden by subclasses
_instance = None
def __init__(self, wrapped_stub):
"""Constructor. Internal use only - see Install()."""
self._wrapped_stub = wrapped_stub
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
def CallWrappedStub(self, call, request, response):
"""Allows subclasses to call the wrapped stub."""
self._wrapped_stub.MakeSyncCall(self.SERVICE_NAME, call, request, response)
def MakeSyncCall(self, service, call, request, response):
assert (service == self.SERVICE_NAME,
'Got service name "%s", expected "%s"'
% (service, self.SERVICE_NAME))
messages = []
assert request.IsInitialized(messages), messages
method = getattr(self, '_Dynamic_' + call, None)
if method:
method(request, response)
else:
self.CallWrappedStub(call, request, response)
assert response.IsInitialized(messages), messages
def __getattr__(self, name):
"""Pass-through to the wrapped stub."""
return getattr(self._wrapped_stub, name)
@classmethod
def Install(cls):
"""Installs the shim. Only needs to be run once at import time.
Note that this accesses internal members of APIProxyStubMap, so may break
in future.
"""
if not cls._instance:
wrapped_stub = apiproxy_stub_map.apiproxy.GetStub(cls.SERVICE_NAME)
assert wrapped_stub, "No service '%s' found to wrap." % cls.SERVICE_NAME
cls._instance = cls(wrapped_stub)
stub_dict = apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map
stub_dict[cls.SERVICE_NAME] = cls._instance
@classmethod
def Uninstall(cls):
"""Uninstalls the shim.
Note that there's no need to uninstall a shim after each request. You can
install it once at import time and leave it there between requests.
"""
if cls._instance:
stub_dict = apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map
stub_dict[cls.SERVICE_NAME] = cls._instance._wrapped_stub
cls._instance = None
class DatastoreCachingShim(APIProxyShim):
SERVICE_NAME = 'datastore_v3'
def __init__(self, default_stub):
super(DatastoreCachingShim, self).__init__(default_stub)
self.local = threading.local()
self.local.to_delete = dict()
def _Dynamic_Get(self, request, response):
"""Intercepts get requests and returns them from cache if available."""
#logging.info("Tx: %s, Keys: %s", request.has_transaction(), [str(x) for x in request.key_list()])
if request.has_transaction():
self.CallWrappedStub('Get', request, response)
return
new_request = datastore_pb.GetRequest()
new_response = datastore_pb.GetResponse()
encoded_keys = [k.Encode() for k in request.key_list()]
cached = memcache.get_multi(encoded_keys)
for key, encoded_key in itertools.izip(request.key_list(), encoded_keys):
if encoded_key not in cached:
new_request.add_key().CopyFrom(key)
if new_request.key_size() > 0:
self.CallWrappedStub('Get', new_request, new_response)
entity_iter = iter(new_response.entity_list())
to_put = dict()
for encoded_key in encoded_keys:
entity = cached.get(encoded_key, None)
if entity:
response.add_entity().mutable_entity().CopyFrom(entity)
else:
entity = entity_iter.next()
if entity.entity().IsInitialized():
to_put[encoded_key] = entity.entity()
response.add_entity().CopyFrom(entity)
if to_put:
memcache.set_multi(to_put)
def _Dynamic_Put(self, request, response):
"""Intercepts puts and adds them to the cache."""
self.CallWrappedStub('Put', request, response)
# If this is in a transaction we mark these entries for deletion
# when and if the transaction commits.
if request.has_transaction():
to_delete = [k.Encode() for k in response.key_list()]
self.local.to_delete[request.transaction().handle()].extend(to_delete)
return
to_put = dict()
for e, k in itertools.izip(request.entity_list(), response.key_list()):
e.key().CopyFrom(k)
to_put[k.Encode()] = e
if to_put:
memcache.set_multi(to_put)
def _Dynamic_Delete(self, request, response):
"""Intercepts deletes and deletes entries from the cache."""
self.CallWrappedStub('Delete', request, response)
to_delete = [k.Encode() for k in request.key_list()]
# If this is in a transaction we mark these entries for deletion
# when and if the transaction commits.
if request.has_transaction():
self.local.to_delete[request.transaction().handle()].extend(to_delete)
return
memcache.delete_multi(to_delete)
def _Dynamic_RunQuery(self, query, query_result):
"""Intercepts query results and caches the returned entities."""
if query.has_transaction():
self.CallWrappedStub('RunQuery', query, query_result)
return
key = 'RunQuery:' + query.Encode()
# For the small applictions, using hash() is also suitable and more efficient:
###############################################################
# key = 'RunQuery:' + str(hash(query.Encode()))
###############################################################
# if you are worry about hash collision, you can use md5, sha or any other hash algorithm instead:
###############################################################
# import hashlib
# key = 'RunQuery:' + hashlib.md5(query.Encode()).hexdigest()
###############################################################
results = memcache.get(key)
if results is None:
self.CallWrappedStub('RunQuery', query, query_result)
memcache.set(key, query_result, _QUERY_CACHE_TIME)
else:
query_result.MergeFrom(results)
def _Dynamic_Next(self, request, response):
"""Intercepts the next batch of results and caches the returned entities."""
self.CallWrappedStub('Next', request, response)
to_put = dict([(e.key().Encode(), e) for e in response.result_list()])
memcache.set_multi(to_put)
def _Dynamic_BeginTransaction(self, request, transaction):
"""Intercepts the beginning of transactions and creates thread local storage for deletions"""
self.CallWrappedStub('BeginTransaction', request, transaction)
self.local.to_delete[transaction.handle()] = []
def _Dynamic_Commit(self, transaction, transaction_response):
"""Intercepts the commit of transactions and deletes all entities that were modified/delete by this transaction"""
# We delete from cache before we commit otherwise we have a race condition.
to_delete = self.local.to_delete[transaction.handle()]
if to_delete:
memcache.delete_multi(to_delete)
del self.local.to_delete[transaction.handle()]
self.CallWrappedStub('Commit', transaction, transaction_response)
def _Dynamic_Rollback(self, transaction, transaction_response):
"""Intercepts the rollback of transactions and clears the thread local storage for them"""
del self.local.to_delete[transaction.handle()]
self.CallWrappedStub('Rollback', transaction, transaction_response)
| Python |
from mapreduce import operation as op
def process(entity):
# change entity
yield op.db.Put(entity)
def process(entity):
yield op.db.Delete(entity)
| Python |
from google.appengine.ext import webapp
import re
register = webapp.template.create_template_register()
def add_zeros(value):
value = int(value)
if value<10:
return '0'+str(value)
else:
return str(value)
register.filter(add_zeros)
def datetostring(value):
strmonth = 'nop'
if str(value.month)=='1':
strmonth = 'Ene'
if str(value.month)=='2':
strmonth = 'Feb'
if str(value.month)=='3':
strmonth = 'Mar'
if str(value.month)=='4':
strmonth = 'Apr'
if str(value.month)=='5':
strmonth = 'May'
if str(value.month)=='6':
strmonth = 'Jun'
if str(value.month)=='7':
strmonth = 'Jul'
if str(value.month)=='8':
strmonth = 'Ago'
if str(value.month)=='9':
strmonth = 'Sept'
if str(value.month)=='10':
strmonth = 'Oct'
if str(value.month)=='11':
strmonth = 'Nov'
if str(value.month)=='12':
strmonth = 'Dic'
return strmonth + ' ' + str(value.day)
register.filter(datetostring)
_base_js_escapes = (
('\\', r'\u005C'),
('\'', r'\u0027'),
('"', r'\u0022'),
('>', r'\u003E'),
('<', r'\u003C'),
('&', r'\u0026'),
('=', r'\u003D'),
('-', r'\u002D'),
(';', r'\u003B'),
(u'\u2028', r'\u2028'),
(u'\u2029', r'\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = value.replace(bad, good)
return value
register.filter(escapejs)
def slugify(string):
string = re.sub('\s+', '_', string)
string = re.sub('[^\w.-]', '', string)
return string.strip('_.- ').lower()
register.filter(slugify)
| Python |
import urllib
import urllib2
import BeautifulSoup
| Python |
import wsgiref.handlers
import sys
import os
import string
import codecs
import random
import re
import cgi
import datetime
import time
import math
import pickle
import logging
import urlparse
from dateutil import parser
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
LIB_PATH = os.path.join(DIR_PATH, "lib")
EXTRA_PATHS = []
for path in os.listdir(LIB_PATH):
fullpath = os.path.join(LIB_PATH, path)
if os.path.isdir(fullpath) and not path.startswith("."):
EXTRA_PATHS.append(fullpath)
sys.path = sys.path + EXTRA_PATHS
from google.appengine.api import mail
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
from google.appengine.ext import search
from google.appengine.ext import db
from google.appengine.api import images
from google.appengine.api import urlfetch
from datetime import datetime, timedelta
from django.core import serializers
from django.http import HttpResponsePermanentRedirect
from django.utils import simplejson as json
from google.appengine.api import users
from google.appengine.api import memcache
from google.appengine.api import images
from google.appengine.ext.db import GqlQuery
from models import *
from utilities import *
webapp.template.register_template_library('customfilters')
webapp.template.register_template_library('smart_if')
# Set to true if we want to have our webapp print stack traces, etc
_DEBUG = True
class BaseRequestHandler(webapp.RequestHandler):
"""Supplies a common template generation function.
When you call generate(), we augment the template variables supplied with
the variables present in this function.
"""
def generate(self, template_name, template_values={}):
values = {
'request': self.request,
'application_name': 'Fieltro',
}
values.update(template_values)
directory = os.path.dirname(__file__)
path = os.path.join(directory, os.path.join('admin', template_name))
os.environ['DJANGO_SETTINGS_MODULE'] = 'conf.settings'
self.response.out.write(template.render(path, values, debug=_DEBUG))
class Index(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
template_values = {
}
self.generate('index.html', template_values)
class Blogs(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Blog.all()
queryitems.order('-pubDate')
blogs = queryitems.fetch(500)
template_values = {
'blogs': blogs,
}
self.generate('blogs.html', template_values)
class BlogAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Blog.get(itemkey)
item.delete()
self.redirect("/admin/blogs", False)
return 1
elif action=='edit':
try:
item = Blog.get(itemkey)
except:
queryitem = Blog.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
name = item.name
url = item.url
slug = item.slug
email = item.email
author = item.author
firstPost = item.firstPost
country = item.country
description = item.description
pubDate = item.pubDate
twitter = item.twitter
firstMonth = item.firstMonth
firstYear = item.firstYear
template_values = {
'name': name,
'author': author,
'url': url,
'slug': slug,
'email': email,
'firstPost': firstPost,
'country': country,
'description': description,
'pubDate': pubDate,
'twitter': twitter,
'firstMonth': firstMonth,
'firstYear': firstYear,
'key': itemkey,
}
elif action=='new':
template_values = {
}
self.generate("blog.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Blog.get(itemkey)
creating = False
else:
item = Blog()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Blog.all()
queryitem.order('-blogId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].blogId
blogId = lastId + 1
item.blogId = blogId
item.name = self.request.get("name")
item.url = self.request.get("url")
item.slug = self.request.get("slug")
item.author = self.request.get("author")
item.email = self.request.get("email")
item.firstPost = parser.parse(self.request.get('firstPost'))
item.country = self.request.get("country")
item.description = self.request.get("description")
item.twitter = self.request.get("twitter")
item.firstMonth = self.request.get("firstMonth")
item.firstYear = self.request.get("firstYear")
item.put()
self.redirect("/admin/blogs", True)
class Tiendas(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Tienda.all()
queryitems.order('-pubDate')
tiendas = queryitems.fetch(500)
template_values = {
'tiendas': tiendas,
}
self.generate('tiendas.html', template_values)
class TiendaAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Tienda.get(itemkey)
item.delete()
self.redirect("/admin/tiendas", False)
return 1
elif action=='edit':
try:
item = Tienda.get(itemkey)
except:
queryitem = Tienda.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
template_values = {
'item': tienda,
'key': itemkey,
}
elif action=='new':
template_values = {
}
self.generate("tienda.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Tienda.get(itemkey)
creating = False
else:
item = Tienda()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Tienda.all()
queryitem.order('-tiendaId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].tiendaId
tiendaId = lastId + 1
item.tiendaId = tiendaId
item.name = self.request.get("name")
item.url = self.request.get("url")
item.slug = self.request.get("slug")
item.author = self.request.get("author")
item.email = self.request.get("email")
item.country = self.request.get("country")
item.description = self.request.get("description")
item.put()
self.redirect("/admin/tiendas", True)
class Libros(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Libro.all()
queryitems.order('-pubDate')
libros = queryitems.fetch(500)
template_values = {
'libros': libros,
}
self.generate('libros.html', template_values)
class LibroAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Libro.get(itemkey)
item.delete()
self.redirect("/admin/libros", False)
return 1
elif action=='edit':
try:
item = Libro.get(itemkey)
except:
queryitem = Libro.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
name = item.name
slug = item.slug
author = item.author
country = item.country
description = item.description
pubDate = item.pubDate
language = item.language
template_values = {
'name': name,
'author': author,
'slug': slug,
'country': country,
'description': description,
'pubDate': pubDate,
'key': itemkey,
'language': language,
}
elif action=='new':
template_values = {
}
self.generate("libro.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Libro.get(itemkey)
creating = False
else:
item = Libro()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Libro.all()
queryitem.order('-libroId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].libroId
libroId = lastId + 1
item.libroId = libroId
item.name = self.request.get("name")
item.slug = self.request.get("slug")
item.author = self.request.get("author")
item.country = self.request.get("country")
item.description = self.request.get("description")
item.language = self.request.get("language")
item.put()
self.redirect("/admin/libros", True)
class Videos(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Video.all()
queryitems.order('-pubDate')
videos = queryitems.fetch(500)
template_values = {
'videos': videos,
}
self.generate('videos.html', template_values)
class VideoAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Video.get(itemkey)
item.delete()
self.redirect("/admin/videos", False)
return 1
elif action=='edit':
try:
item = Video.get(itemkey)
except:
queryitem = Video.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
title = item.title
slug = item.slug
url = item.url
description = item.description
code = item.code
pubDate = item.pubDate
template_values = {
'title': title,
'slug': slug,
'url': url,
'description': description,
'code': code,
'pubDate': pubDate,
'key': itemkey,
}
elif action=='new':
template_values = {
}
self.generate("video.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Video.get(itemkey)
creating = False
else:
item = Video()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Video.all()
queryitem.order('-videoId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].videoId
videoId = lastId + 1
item.videoId = videoId
item.title = self.request.get("title")
item.slug = self.request.get("slug")
item.url = self.request.get("url")
item.description = self.request.get("description")
item.code = self.request.get("code")
item.put()
self.redirect("/admin/videos", True)
class Revistas(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Revista.all()
queryitems.order('-pubDate')
revistas = queryitems.fetch(500)
template_values = {
'revistas': revistas,
}
self.generate('revistas.html', template_values)
class RevistaAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Revista.get(itemkey)
item.delete()
self.redirect("/admin/revistas", False)
return 1
elif action=='edit':
try:
item = Revista.get(itemkey)
except:
queryitem = Revista.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
name = item.name
slug = item.slug
country = item.country
description = item.description
pubDate = item.pubDate
language = item.language
template_values = {
'name': name,
'slug': slug,
'country': country,
'description': description,
'pubDate': pubDate,
'key': itemkey,
'language': language,
}
elif action=='new':
template_values = {
}
self.generate("revista.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Revista.get(itemkey)
creating = False
else:
item = Revista()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Revista.all()
queryitem.order('-revistaId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].revistaId
revistaId = lastId + 1
item.revistaId = revistaId
item.name = self.request.get("name")
item.slug = self.request.get("slug")
item.country = self.request.get("country")
item.description = self.request.get("description")
item.language = self.request.get("language")
item.put()
self.redirect("/admin/revistas", True)
class Categorias(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Categoria.all()
queryitems.order('singular')
categorias = queryitems.fetch(500)
template_values = {
'categorias': categorias,
}
self.generate('categorias.html', template_values)
class CategoriaAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Categoria.get(itemkey)
item.delete()
self.redirect("/admin/categorias", False)
return 1
elif action=='edit':
try:
item = Categoria.get(itemkey)
except:
queryitem = Categoria.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
singular = item.singular
singularSlug = item.singularSlug
plural = item.plural
pluralSlug = item.pluralSlug
menu = item.menu
template_values = {
'singular': singular,
'singularSlug': singularSlug,
'plural': plural,
'pluralSlug': pluralSlug,
'menu': menu,
'key': itemkey,
}
elif action=='new':
template_values = {
}
self.generate("categoria.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Categoria.get(itemkey)
creating = False
else:
item = Categoria()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Categoria.all()
queryitem.order('-categoriaId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].categoriaId
categoriaId = lastId + 1
item.categoriaId = categoriaId
item.singular = self.request.get("singular")
item.singularSlug = self.request.get("singularSlug")
item.plural = self.request.get("plural")
item.pluralSlug = self.request.get("pluralSlug")
item.menu = self.request.get("menu")
item.put()
self.redirect("/admin/categorias", True)
class Tags(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Tag.all()
queryitems.order('singular')
tags = queryitems.fetch(500)
template_values = {
'tags': tags,
}
self.generate('tags.html', template_values)
class TagAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Tag.get(itemkey)
item.delete()
self.redirect("/admin/tags", False)
return 1
elif action=='edit':
try:
item = Tag.get(itemkey)
except:
queryitem = Tag.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
singular = item.singular
singularSlug = item.singularSlug
plural = item.plural
pluralSlug = item.pluralSlug
father = item.father
fatherSlug = item.fatherSlug
queryitems = Tag.all()
queryitems.order('plural')
tags = queryitems.fetch(500)
template_values = {
'singular': singular,
'singularSlug': singularSlug,
'plural': plural,
'pluralSlug': pluralSlug,
'father': father,
'fatherSlug': fatherSlug,
'key': itemkey,
'tags': tags,
}
elif action=='new':
queryitems = Tag.all()
queryitems.order('plural')
tags = queryitems.fetch(500)
template_values = {
'tags': tags,
}
self.generate("tag.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Tag.get(itemkey)
creating = False
else:
item = Tag()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Tag.all()
queryitem.order('-tagId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].tagId
tagId = lastId + 1
item.tagId = tagId
item.singular = self.request.get("singular")
item.singularSlug = self.request.get("singularSlug")
item.plural = self.request.get("plural")
item.pluralSlug = self.request.get("pluralSlug")
item.father = self.request.get("father")
item.fatherSlug = self.request.get("fatherSlug")
item.put()
self.redirect("/admin/tags", True)
class Cosas(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Cosa.all()
queryitems.order('-pubDate')
cosas = queryitems.fetch(500)
template_values = {
'cosas': cosas,
}
self.generate('cosas.html', template_values)
class CosaAction(BaseRequestHandler):
def get(self, itemkey, action):
if action == 'delete':
item = Cosa.get(itemkey)
item.delete()
self.redirect("/admin/cosas", False)
return 1
elif action=='edit':
try:
item = Cosa.get(itemkey)
except:
queryitem = Cosa.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
title = item.title
slug = item.slug
url = item.url
urlimg = item.urlimg
urlimg = item.urlimg
description = item.description
tag = item.tag
tagSlug = item.tagSlug
categoria = item.categoria
categoriaSlug = item.categoriaSlug
queryitems = Categoria.all()
queryitems.order('plural')
categorias = queryitems.fetch(500)
queryitems = Tag.all()
queryitems.order('plural')
tags = queryitems.fetch(500)
template_values = {
'categorias': categorias,
'tags': tags,
'title': title,
'slug': slug,
'url': url,
'urlimg': urlimg,
'description': description,
'tag': tag,
'tagSlug': tagSlug,
'categoria': categoria,
'categoriaSlug': categoriaSlug,
'key': itemkey,
}
elif action=='new':
queryitems = Categoria.all()
queryitems.order('plural')
categorias = queryitems.fetch(500)
queryitems = Tag.all()
queryitems.order('plural')
tags = queryitems.fetch(500)
template_values = {
'categorias': categorias,
'tags': tags,
}
self.generate('cosa.html', template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Cosa.get(itemkey)
creating = False
else:
item = Cosa()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Tutorial.all()
queryitem.order('-cosaId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].cosaId
cosaId = lastId + 1
item.cosaId = cosaId
item.title = self.request.get("title")
item.slug = self.request.get("slug")
item.url = self.request.get("url")
item.urlimg = self.request.get("urlimg")
item.tag = self.request.get("tag")
item.categoria = self.request.get("categoria")
item.tagSlug = self.request.get("tagSlug")
item.categoriaSlug = self.request.get("categoriaSlug")
item.description = self.request.get("description")
item.put()
self.redirect("/admin/cosas", True)
class Tutoriales(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Tutorial.all()
queryitems.order('-pubDate')
tutoriales = queryitems.fetch(500)
template_values = {
'tutoriales': tutoriales,
}
self.generate('tutoriales.html', template_values)
class TutorialAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
item = Tutorial.get(itemkey)
item.delete()
self.redirect("/admin/tutoriales", False)
return 1
elif action=='edit':
try:
item = Tutorial.get(itemkey)
except:
queryitem = Revista.all()
queryitem.filter(' key = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
title = item.title
slug = item.slug
text = item.text
pubDate = item.pubDate
author = item.author
template_values = {
'title': title,
'slug': slug,
'text': text,
'pubDate': pubDate,
'key': itemkey,
'author': author,
}
elif action=='new':
template_values = {
}
self.generate("tutorial.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
item = Tutorial.get(itemkey)
creating = False
else:
item = Tutorial()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest orderNum used, increase it by 1 and use it
queryitem = Tutorial.all()
queryitem.order('-tutorialId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].tutorialId
tutorialId = lastId + 1
item.tutorialId = tutorialId
item.title = self.request.get("title")
item.slug = self.request.get("slug")
item.text = self.request.get("text")
item.author = self.request.get("author")
item.put()
self.redirect("/admin/tutoriales", True)
class Wallpapers(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
template_values = {
}
self.generate('wallpapers.html', template_values)
class WallpaperAction(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
template_values = {
}
self.generate('wallpaper.html', template_values)
class Sounds(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
template_values = {
}
self.generate('sounds.html', template_values)
class SoundAction(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
template_values = {
}
self.generate('sound.html', template_values)
class Galleries(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
queryitems = Gallery.all()
queryitems.order('-pubDate')
items = queryitems.fetch(500)
querytags = GalleryTag.all()
querytags.order('name')
tags = querytags.fetch(100)
template_values = {
'galleries': items,
'tags': tags
}
self.generate('galleries.html', template_values)
class GalleryAction(BaseRequestHandler):
def get(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
category = self.request.get("c")
if action == 'delete':
item = Gallery.get(itemkey)
item.delete()
self.redirect("/admin/galleries/", False)
return 1
elif action=='edit':
try:
item = Gallery.get(itemkey)
except:
queryitem = Gallery.all()
queryitem.filter(' galleryId = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
title = item.title
pubDate = item.pubDate
key = str(item.key())
galleryId = item.galleryId
galleryTags = item.tags
category = item.category
querytags = GalleryTag.all()
querytags.order('name')
tags = querytags.fetch(100)
queryphotos = File.all()
queryphotos.filter(' galleryId = ', int(galleryId))
photos = queryphotos.fetch(50)
template_values = {
'title': title,
'pubDate': pubDate,
'galleryId': galleryId,
'galleryTags': galleryTags,
'tags': tags,
'category': category,
'photos': photos,
}
elif action=='new':
querytags = GalleryTag.all()
querytags.order('name')
tags = querytags.fetch(100)
template_values = {
'tags': tags,
'category': category,
}
self.generate("gallery.html", template_values)
def post(self, itemkey, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if itemkey != '':
queryitem = Gallery.all()
queryitem.filter(' galleryId = ', int(itemkey))
items = queryitem.fetch(1)
item = items[0]
creating = False
else:
item = Gallery()
creating = True
if self.request.get("save")!='':
if creating:
# Get the latest galleryId used, increase it by 1 and use it
queryitem = Gallery.all()
queryitem.order('-galleryId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].galleryId
galleryId = lastId + 1
item.galleryId = galleryId
item.title = self.request.get("title")
item.category = self.request.get("category")
item.tags = self.request.POST.getall("tag")
item.put()
self.redirect("/admin/galleries/", True)
class PublishGallery(webapp.RequestHandler):
def post(self, id):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
querygallery = Gallery.all()
querygallery.filter(' galleryId = ', int(galleryId))
galleries = querygallery.fetch(1)
gallery = galleries[0]
gallery.draft = False
gallery.put()
self.response.out.write("Happy end")
class UnpublishGallery(webapp.RequestHandler):
def post(self, id):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
querygallery = Gallery.all()
querygallery.filter(' galleryId = ', int(galleryId))
galleries = querygallery.fetch(1)
gallery = galleries[0]
gallery.draft = True
gallery.put()
self.response.out.write("Happy end")
class GalleryTags(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
querytags = GalleryTag.all()
querytags.order('name')
tags = querytags.fetch(500)
template_values = {
'tags': tags,
}
self.generate('gallerytags.html', template_values)
class GalleryTagAction(BaseRequestHandler):
def get(self, key, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if action == 'delete':
tag = GalleryTag.get(key)
tag.delete()
template_values = {}
self.redirect("/admin/gallerytags/", True)
return 1
elif action=='edit':
tag = GalleryTag.get(key)
name = tag.name
slug = tag.slug
template_values = {
'name': name,
'slug': slug,
}
self.generate('gallerytags.html', template_values)
def post(self, key, action):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
if key != '':
tag = GalleryTag.get(key)
else:
tag = GalleryTag()
if self.request.get("save")!='':
name = self.request.get("name")
slug = self.request.get("slug")
tag.name = name
tag.slug = slug
tag.put()
memcache.flush_all()
self.redirect("/admin/gallerytags/", True)
class AvPhotos(webapp.RequestHandler):
def get(self, category):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if category == "man":
queryitem = File.all()
queryitem.filter(' category = ', 'man')
queryitem.filter(' available = ', True)
queryitem.order('-pubDate')
photos = queryitem.fetch(900)
if category == "woman":
queryitem = File.all()
queryitem.filter(' category = ', 'woman')
queryitem.filter(' available = ', True)
photos = queryitem.fetch(900)
if category == "funny":
queryitem = File.all()
queryitem.filter(' category = ', 'funny')
queryitem.filter(' available = ', True)
photos = queryitem.fetch(800)
template_values = {
'photos' : photos,
}
path = os.path.join(os.path.dirname(__file__), 'admin/gallery-avphotos.html')
self.response.out.write(template.render(path, template_values))
class AssignPhoto(webapp.RequestHandler):
def post(self, fileId, galleryId):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
queryitem = File.all()
queryitem.filter(' fileId = ', int(fileId))
photos = queryitem.fetch(1)
photo = photos[0]
photo.available = False
photo.galleryId = int(galleryId)
photo.put()
querygallery = Gallery.all()
querygallery.filter(' galleryId = ', int(galleryId))
galleries = querygallery.fetch(1)
gallery = galleries[0]
gallery.photosCounter += 1
gallery.put()
self.response.out.write("Todo ok. Photo assigned")
class MakeMainPhoto(webapp.RequestHandler):
def post(self, fileId, galleryId):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
querygallery = Gallery.all()
querygallery.filter(' galleryId = ', int(galleryId))
galleries = querygallery.fetch(1)
gallery = galleries[0]
gallery.thumbnail = fileId
gallery.put()
self.response.out.write("Todo ok. Photo is main")
class UnassignPhoto(webapp.RequestHandler):
def post(self, fileId, galleryId):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
queryitem = File.all()
queryitem.filter(' fileId = ', int(fileId))
photos = queryitem.fetch(1)
photo = photos[0]
photo.available = True
photo.galleryId = 0
photo.put()
self.response.out.write("Todo ok. Photo unassigned")
class CropImage(webapp.RequestHandler):
def get(self, category):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
photobig=self.request.get("photobig")
template_values = {
'photobig' : photobig,
'category' : category
}
path = os.path.join(os.path.dirname(__file__), 'admin/gallery-crop.html')
self.response.out.write(template.render(path, template_values))
class SaveImage(webapp.RequestHandler):
def post(self, item):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
photobig = self.request.get("photobig")
posy = int(self.request.get("posy"))
posx = int(self.request.get("posx"))
#try:
result = urlfetch.fetch(photobig)
#except:
#self.response.out.write("Image too big!")
#return 0
content = result.content
img = images.Image(content)
originalwidth = float(img.width)
originalheight = float(img.height)
ratio = float(originalwidth/originalheight)
resizedheight = float(480)
resizedwidth = float(320)
calculatedwidth = math.ceil(float(float(originalwidth * resizedheight) / originalheight))
calculatedheight = math.ceil(float(float(originalheight * resizedwidth) / originalwidth))
if ratio > 0.66666667:
resizedcontent = images.resize(content, int(calculatedwidth), int(resizedheight))
left_x = float(posx / calculatedwidth)
right_x = float((posx + 320) / calculatedwidth)
top_y = float(posy / resizedheight)
bottom_y = float(posy + 480 / resizedheight)
#self.response.out.write("ratio > 4/6 - " + str(left_x) + ", " + str(top_y) + ", " + str(right_x) + ", " + str(bottom_y))
#return 0
else:
resizedcontent = images.resize(content, int(resizedwidth), int(calculatedheight))
left_x = float(posx / resizedwidth)
right_x = float((posx + 320) / resizedwidth)
top_y = float(posy / calculatedheight)
bottom_y = float((posy + 480) / calculatedheight)
#self.response.out.write("ratio < 4/6 - " + str(left_x) + ", " + str(top_y) + ", " + str(right_x) + ", " + str(bottom_y))
#return 0
#posx: 0, posy: 5
#left_x: 0.0, top_y: 0.0104166666667, right_x: 1.01587301587, bottom_y: 1.01041666667
croppedcontent = images.crop(resizedcontent, left_x, top_y, right_x, bottom_y)
img = images.Image(croppedcontent)
myfile = File()
myfile.bytes_content = db.Blob(croppedcontent)
myfile.height = img.height
myfile.width = img.width
queryitem = File.all()
queryitem.order('-fileId')
items = queryitem.fetch(1)
if not len(items) > 0:
lastId = 0
else:
lastId = items[0].fileId
fileId = lastId + 1
myfile.fileId = fileId
if item == "man":
myfile.category = "man"
if item == "woman":
myfile.category = "woman"
if item == "funny":
myfile.category = "funny"
if item == "odd":
myfile.category = "odd"
myfile.put()
#self.response.out.write(str(left_x) + ", " + str(top_y) + ", " + str(right_x) + ", " + str(bottom_y))
self.response.out.write("/image/" + str(fileId) + "/")
class Users(BaseRequestHandler):
def get(self):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
template_values = {
}
self.generate('users.html', template_values)
class UploadImage(BaseRequestHandler):
def get(self, filekey, callback):
imagetype = self.request.get('imagetype')
template_values = {
'filekey': filekey,
'callback': callback,
'imagetype': imagetype
}
self.generate('fileupload.html', template_values)
def post(self, filekey, callback):
for upload in self.request.POST.getall('fileupload[]'):
#item = self.request.POST.get('fileupload')
imagetype = self.request.get('imagetype')
file_content = upload.file.read()
content = db.Blob(file_content)
myfile = File()
img = images.Image(content)
myfile.bytes_content = content
myfile.height = img.height
myfile.width = img.width
myfile.imagetype = imagetype
myfile.put()
template_values = {
'fileKey': str(myfile.key()),
'posted': True,
'callback': callback,
'imagetype': imagetype
}
self.generate('fileupload.html', template_values)
class DeleteImage(BaseRequestHandler):
def post(self, key):
logincontrol = LoginControl()
access = logincontrol.CheckLogin('admin',self)
if access==0:
return 0
myfile = File.get(key)
myfile.delete()
# Create first admin user to start working with the app.
class PopulateUsers(BaseRequestHandler):
def get(self):
user = CustomUser(name="Admin",
user=users.GetCurrentUser(),
role="admin")
user.put()
def main():
application = webapp.WSGIApplication([
('/admin/populateusers/', PopulateUsers),
('/admin/populateusers', PopulateUsers),
('/admin/upload/(.*)/(.*)/', UploadImage),
('/admin/image/delete/(.*)/', DeleteImage),
('/admin/blogs', Blogs),
('/admin/blogs/(.*)/(new)/', BlogAction),
('/admin/blogs/(.*)/(save)/', BlogAction),
('/admin/blogs/(.*)/(delete)/', BlogAction),
('/admin/blogs/(.*)/(edit)/', BlogAction),
('/admin/tiendas', Tiendas),
('/admin/tiendas/(.*)/(new)/', TiendaAction),
('/admin/tiendas/(.*)/(save)/', TiendaAction),
('/admin/tiendas/(.*)/(delete)/', TiendaAction),
('/admin/tiendas/(.*)/(edit)/', TiendaAction),
('/admin/libros', Libros),
('/admin/libros/(.*)/(new)/', LibroAction),
('/admin/libros/(.*)/(save)/', LibroAction),
('/admin/libros/(.*)/(delete)/', LibroAction),
('/admin/libros/(.*)/(edit)/', LibroAction),
('/admin/revistas', Revistas),
('/admin/revistas/(.*)/(new)/', RevistaAction),
('/admin/revistas/(.*)/(save)/', RevistaAction),
('/admin/revistas/(.*)/(delete)/', RevistaAction),
('/admin/revistas/(.*)/(edit)/', RevistaAction),
('/admin/tutoriales', Tutoriales),
('/admin/tutoriales/(.*)/(new)/', TutorialAction),
('/admin/tutoriales/(.*)/(save)/', TutorialAction),
('/admin/tutoriales/(.*)/(delete)/', TutorialAction),
('/admin/tutoriales/(.*)/(edit)/', TutorialAction),
('/admin/videos', Videos),
('/admin/videos/(.*)/(new)/', VideoAction),
('/admin/videos/(.*)/(save)/', VideoAction),
('/admin/videos/(.*)/(delete)/', VideoAction),
('/admin/videos/(.*)/(edit)/', VideoAction),
('/admin/cosas', Cosas),
('/admin/cosas/(.*)/(new)/', CosaAction),
('/admin/cosas/(.*)/(save)/', CosaAction),
('/admin/cosas/(.*)/(delete)/', CosaAction),
('/admin/cosas/(.*)/(edit)/', CosaAction),
('/admin/categorias', Categorias),
('/admin/categorias/(.*)/(new)/', CategoriaAction),
('/admin/categorias/(.*)/(save)/', CategoriaAction),
('/admin/categorias/(.*)/(delete)/', CategoriaAction),
('/admin/categorias/(.*)/(edit)/', CategoriaAction),
('/admin/tags', Tags),
('/admin/tags/(.*)/(new)/', TagAction),
('/admin/tags/(.*)/(save)/', TagAction),
('/admin/tags/(.*)/(delete)/', TagAction),
('/admin/tags/(.*)/(edit)/', TagAction),
('/admin/', Cosas),
], debug=_DEBUG)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == "__main__":
main()
| Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import calendar
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta:
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if not isinstance(dt1, datetime.date) or \
not isinstance(dt2, datetime.date):
raise TypeError, "relativedelta only diffs datetime/date"
if type(dt1) is not type(dt2):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if type(weekday) is int:
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31,59,90,120,151,181,212,243,273,304,334,366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError, "invalid year day (%d)" % yday
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __radd__(self, other):
if not isinstance(other, datetime.date):
raise TypeError, "unsupported type for add operation"
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __add__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for add operation"
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError, "unsupported type for sub operation"
return relativedelta(years=other.years-self.years,
months=other.months-self.months,
days=other.days-self.days,
hours=other.hours-self.hours,
minutes=other.minutes-self.minutes,
seconds=other.seconds-self.seconds,
microseconds=other.microseconds-self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.second or self.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __nonzero__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=self.years*f,
months=self.months*f,
days=self.days*f,
hours=self.hours*f,
minutes=self.minutes*f,
seconds=self.seconds*f,
microseconds=self.microseconds*f,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
import struct
import time
import sys
import os
relativedelta = None
parser = None
rrule = None
__all__ = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "tzwin", "tzwinlocal", "gettz"]
try:
from dateutil.tzwin import tzwin, tzwinlocal
except (ImportError, OSError):
tzwin, tzwinlocal = None, None
ZERO = datetime.timedelta(0)
EPOCHORDINAL = datetime.datetime.utcfromtimestamp(0).toordinal()
class tzutc(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def dst(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def __eq__(self, other):
return (isinstance(other, tzutc) or
(isinstance(other, tzoffset) and other._offset == ZERO))
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzoffset(datetime.tzinfo):
def __init__(self, name, offset):
self._name = name
self._offset = datetime.timedelta(seconds=offset)
def utcoffset(self, dt):
return self._offset
def dst(self, dt):
return ZERO
def tzname(self, dt):
return self._name
def __eq__(self, other):
return (isinstance(other, tzoffset) and
self._offset == other._offset)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,
`self._name`,
self._offset.days*86400+self._offset.seconds)
__reduce__ = object.__reduce__
class tzlocal(datetime.tzinfo):
_std_offset = datetime.timedelta(seconds=-time.timezone)
if time.daylight:
_dst_offset = datetime.timedelta(seconds=-time.altzone)
else:
_dst_offset = _std_offset
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
return time.tzname[self._isdst(dt)]
def _isdst(self, dt):
# We can't use mktime here. It is unstable when deciding if
# the hour near to a change is DST or not.
#
# timestamp = time.mktime((dt.year, dt.month, dt.day, dt.hour,
# dt.minute, dt.second, dt.weekday(), 0, -1))
# return time.localtime(timestamp).tm_isdst
#
# The code above yields the following result:
#
#>>> import tz, datetime
#>>> t = tz.tzlocal()
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,16,0,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRST'
#>>> datetime.datetime(2003,2,15,22,tzinfo=t).tzname()
#'BRDT'
#>>> datetime.datetime(2003,2,15,23,tzinfo=t).tzname()
#'BRDT'
#
# Here is a more stable implementation:
#
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
return time.localtime(timestamp+time.timezone).tm_isdst
def __eq__(self, other):
if not isinstance(other, tzlocal):
return False
return (self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset)
return True
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
__reduce__ = object.__reduce__
class _ttinfo(object):
__slots__ = ["offset", "delta", "isdst", "abbr", "isstd", "isgmt"]
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def __repr__(self):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
def __eq__(self, other):
if not isinstance(other, _ttinfo):
return False
return (self.offset == other.offset and
self.delta == other.delta and
self.isdst == other.isdst and
self.abbr == other.abbr and
self.isstd == other.isstd and
self.isgmt == other.isgmt)
def __ne__(self, other):
return not self.__eq__(other)
def __getstate__(self):
state = {}
for name in self.__slots__:
state[name] = getattr(self, name, None)
return state
def __setstate__(self, state):
for name in self.__slots__:
if name in state:
setattr(self, name, state[name])
class tzfile(datetime.tzinfo):
# http://www.twinsun.com/tz/tz-link.htm
# ftp://elsie.nci.nih.gov/pub/tz*.tar.gz
def __init__(self, fileobj):
if isinstance(fileobj, basestring):
self._filename = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = `fileobj`
# From tzfile(5):
#
# The time zone information files used by tzset(3)
# begin with the magic characters "TZif" to identify
# them as time zone information files, followed by
# sixteen bytes reserved for future use, followed by
# six four-byte values of type long, written in a
# ``standard'' byte order (the high-order byte
# of the value is written first).
if fileobj.read(4) != "TZif":
raise ValueError, "magic not found"
fileobj.read(16)
(
# The number of UTC/local indicators stored in the file.
ttisgmtcnt,
# The number of standard/wall indicators stored in the file.
ttisstdcnt,
# The number of leap seconds for which data is
# stored in the file.
leapcnt,
# The number of "transition times" for which data
# is stored in the file.
timecnt,
# The number of "local time types" for which data
# is stored in the file (must not be zero).
typecnt,
# The number of characters of "time zone
# abbreviation strings" stored in the file.
charcnt,
) = struct.unpack(">6l", fileobj.read(24))
# The above header is followed by tzh_timecnt four-byte
# values of type long, sorted in ascending order.
# These values are written in ``standard'' byte order.
# Each is used as a transition time (as returned by
# time(2)) at which the rules for computing local time
# change.
if timecnt:
self._trans_list = struct.unpack(">%dl" % timecnt,
fileobj.read(timecnt*4))
else:
self._trans_list = []
# Next come tzh_timecnt one-byte values of type unsigned
# char; each one tells which of the different types of
# ``local time'' types described in the file is associated
# with the same-indexed transition time. These values
# serve as indices into an array of ttinfo structures that
# appears next in the file.
if timecnt:
self._trans_idx = struct.unpack(">%dB" % timecnt,
fileobj.read(timecnt))
else:
self._trans_idx = []
# Each ttinfo structure is written as a four-byte value
# for tt_gmtoff of type long, in a standard byte
# order, followed by a one-byte value for tt_isdst
# and a one-byte value for tt_abbrind. In each
# structure, tt_gmtoff gives the number of
# seconds to be added to UTC, tt_isdst tells whether
# tm_isdst should be set by localtime(3), and
# tt_abbrind serves as an index into the array of
# time zone abbreviation characters that follow the
# ttinfo structure(s) in the file.
ttinfo = []
for i in range(typecnt):
ttinfo.append(struct.unpack(">lbb", fileobj.read(6)))
abbr = fileobj.read(charcnt)
# Then there are tzh_leapcnt pairs of four-byte
# values, written in standard byte order; the
# first value of each pair gives the time (as
# returned by time(2)) at which a leap second
# occurs; the second gives the total number of
# leap seconds to be applied after the given time.
# The pairs of values are sorted in ascending order
# by time.
# Not used, for now
if leapcnt:
leap = struct.unpack(">%dl" % (leapcnt*2),
fileobj.read(leapcnt*8))
# Then there are tzh_ttisstdcnt standard/wall
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as standard
# time or wall clock time, and are used when
# a time zone file is used in handling POSIX-style
# time zone environment variables.
if ttisstdcnt:
isstd = struct.unpack(">%db" % ttisstdcnt,
fileobj.read(ttisstdcnt))
# Finally, there are tzh_ttisgmtcnt UTC/local
# indicators, each stored as a one-byte value;
# they tell whether the transition times associated
# with local time types were specified as UTC or
# local time, and are used when a time zone file
# is used in handling POSIX-style time zone envi-
# ronment variables.
if ttisgmtcnt:
isgmt = struct.unpack(">%db" % ttisgmtcnt,
fileobj.read(ttisgmtcnt))
# ** Everything has been read **
# Build ttinfo list
self._ttinfo_list = []
for i in range(typecnt):
gmtoff, isdst, abbrind = ttinfo[i]
# Round to full-minutes if that's not the case. Python's
# datetime doesn't accept sub-minute timezones. Check
# http://python.org/sf/1447945 for some information.
gmtoff = (gmtoff+30)//60*60
tti = _ttinfo()
tti.offset = gmtoff
tti.delta = datetime.timedelta(seconds=gmtoff)
tti.isdst = isdst
tti.abbr = abbr[abbrind:abbr.find('\x00', abbrind)]
tti.isstd = (ttisstdcnt > i and isstd[i] != 0)
tti.isgmt = (ttisgmtcnt > i and isgmt[i] != 0)
self._ttinfo_list.append(tti)
# Replace ttinfo indexes for ttinfo objects.
trans_idx = []
for idx in self._trans_idx:
trans_idx.append(self._ttinfo_list[idx])
self._trans_idx = tuple(trans_idx)
# Set standard, dst, and before ttinfos. before will be
# used when a given time is before any transitions,
# and will be set to the first non-dst ttinfo, or to
# the first dst, if all of them are dst.
self._ttinfo_std = None
self._ttinfo_dst = None
self._ttinfo_before = None
if self._ttinfo_list:
if not self._trans_list:
self._ttinfo_std = self._ttinfo_first = self._ttinfo_list[0]
else:
for i in range(timecnt-1,-1,-1):
tti = self._trans_idx[i]
if not self._ttinfo_std and not tti.isdst:
self._ttinfo_std = tti
elif not self._ttinfo_dst and tti.isdst:
self._ttinfo_dst = tti
if self._ttinfo_std and self._ttinfo_dst:
break
else:
if self._ttinfo_dst and not self._ttinfo_std:
self._ttinfo_std = self._ttinfo_dst
for tti in self._ttinfo_list:
if not tti.isdst:
self._ttinfo_before = tti
break
else:
self._ttinfo_before = self._ttinfo_list[0]
# Now fix transition times to become relative to wall time.
#
# I'm not sure about this. In my tests, the tz source file
# is setup to wall time, and in the binary file isstd and
# isgmt are off, so it should be in wall time. OTOH, it's
# always in gmt time. Let me know if you have comments
# about this.
laststdoffset = 0
self._trans_list = list(self._trans_list)
for i in range(len(self._trans_list)):
tti = self._trans_idx[i]
if not tti.isdst:
# This is std time.
self._trans_list[i] += tti.offset
laststdoffset = tti.offset
else:
# This is dst time. Convert to std.
self._trans_list[i] += laststdoffset
self._trans_list = tuple(self._trans_list)
def _find_ttinfo(self, dt, laststd=0):
timestamp = ((dt.toordinal() - EPOCHORDINAL) * 86400
+ dt.hour * 3600
+ dt.minute * 60
+ dt.second)
idx = 0
for trans in self._trans_list:
if timestamp < trans:
break
idx += 1
else:
return self._ttinfo_std
if idx == 0:
return self._ttinfo_before
if laststd:
while idx > 0:
tti = self._trans_idx[idx-1]
if not tti.isdst:
return tti
idx -= 1
else:
return self._ttinfo_std
else:
return self._trans_idx[idx-1]
def utcoffset(self, dt):
if not self._ttinfo_std:
return ZERO
return self._find_ttinfo(dt).delta
def dst(self, dt):
if not self._ttinfo_dst:
return ZERO
tti = self._find_ttinfo(dt)
if not tti.isdst:
return ZERO
# The documentation says that utcoffset()-dst() must
# be constant for every dt.
return tti.delta-self._find_ttinfo(dt, laststd=1).delta
# An alternative for that would be:
#
# return self._ttinfo_dst.offset-self._ttinfo_std.offset
#
# However, this class stores historical changes in the
# dst offset, so I belive that this wouldn't be the right
# way to implement this.
def tzname(self, dt):
if not self._ttinfo_std:
return None
return self._find_ttinfo(dt).abbr
def __eq__(self, other):
if not isinstance(other, tzfile):
return False
return (self._trans_list == other._trans_list and
self._trans_idx == other._trans_idx and
self._ttinfo_list == other._ttinfo_list)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._filename`)
def __reduce__(self):
if not os.path.isfile(self._filename):
raise ValueError, "Unpickable %s class" % self.__class__.__name__
return (self.__class__, (self._filename,))
class tzrange(datetime.tzinfo):
def __init__(self, stdabbr, stdoffset=None,
dstabbr=None, dstoffset=None,
start=None, end=None):
global relativedelta
if not relativedelta:
from dateutil import relativedelta
self._std_abbr = stdabbr
self._dst_abbr = dstabbr
if stdoffset is not None:
self._std_offset = datetime.timedelta(seconds=stdoffset)
else:
self._std_offset = ZERO
if dstoffset is not None:
self._dst_offset = datetime.timedelta(seconds=dstoffset)
elif dstabbr and stdoffset is not None:
self._dst_offset = self._std_offset+datetime.timedelta(hours=+1)
else:
self._dst_offset = ZERO
if dstabbr and start is None:
self._start_delta = relativedelta.relativedelta(
hours=+2, month=4, day=1, weekday=relativedelta.SU(+1))
else:
self._start_delta = start
if dstabbr and end is None:
self._end_delta = relativedelta.relativedelta(
hours=+1, month=10, day=31, weekday=relativedelta.SU(-1))
else:
self._end_delta = end
def utcoffset(self, dt):
if self._isdst(dt):
return self._dst_offset
else:
return self._std_offset
def dst(self, dt):
if self._isdst(dt):
return self._dst_offset-self._std_offset
else:
return ZERO
def tzname(self, dt):
if self._isdst(dt):
return self._dst_abbr
else:
return self._std_abbr
def _isdst(self, dt):
if not self._start_delta:
return False
year = datetime.datetime(dt.year,1,1)
start = year+self._start_delta
end = year+self._end_delta
dt = dt.replace(tzinfo=None)
if start < end:
return dt >= start and dt < end
else:
return dt >= start or dt < end
def __eq__(self, other):
if not isinstance(other, tzrange):
return False
return (self._std_abbr == other._std_abbr and
self._dst_abbr == other._dst_abbr and
self._std_offset == other._std_offset and
self._dst_offset == other._dst_offset and
self._start_delta == other._start_delta and
self._end_delta == other._end_delta)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s(...)" % self.__class__.__name__
__reduce__ = object.__reduce__
class tzstr(tzrange):
def __init__(self, s):
global parser
if not parser:
from dateutil import parser
self._s = s
res = parser._parsetz(s)
if res is None:
raise ValueError, "unknown string format"
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC"):
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
def _delta(self, x, isend=0):
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset-self._std_offset
kwargs["seconds"] -= delta.seconds+delta.days*86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
class _tzicalvtzcomp:
def __init__(self, tzoffsetfrom, tzoffsetto, isdst,
tzname=None, rrule=None):
self.tzoffsetfrom = datetime.timedelta(seconds=tzoffsetfrom)
self.tzoffsetto = datetime.timedelta(seconds=tzoffsetto)
self.tzoffsetdiff = self.tzoffsetto-self.tzoffsetfrom
self.isdst = isdst
self.tzname = tzname
self.rrule = rrule
class _tzicalvtz(datetime.tzinfo):
def __init__(self, tzid, comps=[]):
self._tzid = tzid
self._comps = comps
self._cachedate = []
self._cachecomp = []
def _find_comp(self, dt):
if len(self._comps) == 1:
return self._comps[0]
dt = dt.replace(tzinfo=None)
try:
return self._cachecomp[self._cachedate.index(dt)]
except ValueError:
pass
lastcomp = None
lastcompdt = None
for comp in self._comps:
if not comp.isdst:
# Handle the extra hour in DST -> STD
compdt = comp.rrule.before(dt-comp.tzoffsetdiff, inc=True)
else:
compdt = comp.rrule.before(dt, inc=True)
if compdt and (not lastcompdt or lastcompdt < compdt):
lastcompdt = compdt
lastcomp = comp
if not lastcomp:
# RFC says nothing about what to do when a given
# time is before the first onset date. We'll look for the
# first standard component, or the first component, if
# none is found.
for comp in self._comps:
if not comp.isdst:
lastcomp = comp
break
else:
lastcomp = comp[0]
self._cachedate.insert(0, dt)
self._cachecomp.insert(0, lastcomp)
if len(self._cachedate) > 10:
self._cachedate.pop()
self._cachecomp.pop()
return lastcomp
def utcoffset(self, dt):
return self._find_comp(dt).tzoffsetto
def dst(self, dt):
comp = self._find_comp(dt)
if comp.isdst:
return comp.tzoffsetdiff
else:
return ZERO
def tzname(self, dt):
return self._find_comp(dt).tzname
def __repr__(self):
return "<tzicalvtz %s>" % `self._tzid`
__reduce__ = object.__reduce__
class tzical:
def __init__(self, fileobj):
global rrule
if not rrule:
from dateutil import rrule
if isinstance(fileobj, basestring):
self._s = fileobj
fileobj = open(fileobj)
elif hasattr(fileobj, "name"):
self._s = fileobj.name
else:
self._s = `fileobj`
self._vtz = {}
self._parse_rfc(fileobj.read())
def keys(self):
return self._vtz.keys()
def get(self, tzid=None):
if tzid is None:
keys = self._vtz.keys()
if len(keys) == 0:
raise ValueError, "no timezones defined"
elif len(keys) > 1:
raise ValueError, "more than one timezone available"
tzid = keys[0]
return self._vtz.get(tzid)
def _parse_offset(self, s):
s = s.strip()
if not s:
raise ValueError, "empty offset"
if s[0] in ('+', '-'):
signal = (-1,+1)[s[0]=='+']
s = s[1:]
else:
signal = +1
if len(s) == 4:
return (int(s[:2])*3600+int(s[2:])*60)*signal
elif len(s) == 6:
return (int(s[:2])*3600+int(s[2:4])*60+int(s[4:]))*signal
else:
raise ValueError, "invalid offset: "+s
def _parse_rfc(self, s):
lines = s.splitlines()
if not lines:
raise ValueError, "empty string"
# Unfold
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
tzid = None
comps = []
invtz = False
comptype = None
for line in lines:
if not line:
continue
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0].upper()
parms = parms[1:]
if invtz:
if name == "BEGIN":
if value in ("STANDARD", "DAYLIGHT"):
# Process component
pass
else:
raise ValueError, "unknown component: "+value
comptype = value
founddtstart = False
tzoffsetfrom = None
tzoffsetto = None
rrulelines = []
tzname = None
elif name == "END":
if value == "VTIMEZONE":
if comptype:
raise ValueError, \
"component not closed: "+comptype
if not tzid:
raise ValueError, \
"mandatory TZID not found"
if not comps:
raise ValueError, \
"at least one component is needed"
# Process vtimezone
self._vtz[tzid] = _tzicalvtz(tzid, comps)
invtz = False
elif value == comptype:
if not founddtstart:
raise ValueError, \
"mandatory DTSTART not found"
if tzoffsetfrom is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
if tzoffsetto is None:
raise ValueError, \
"mandatory TZOFFSETFROM not found"
# Process component
rr = None
if rrulelines:
rr = rrule.rrulestr("\n".join(rrulelines),
compatible=True,
ignoretz=True,
cache=True)
comp = _tzicalvtzcomp(tzoffsetfrom, tzoffsetto,
(comptype == "DAYLIGHT"),
tzname, rr)
comps.append(comp)
comptype = None
else:
raise ValueError, \
"invalid component end: "+value
elif comptype:
if name == "DTSTART":
rrulelines.append(line)
founddtstart = True
elif name in ("RRULE", "RDATE", "EXRULE", "EXDATE"):
rrulelines.append(line)
elif name == "TZOFFSETFROM":
if parms:
raise ValueError, \
"unsupported %s parm: %s "%(name, parms[0])
tzoffsetfrom = self._parse_offset(value)
elif name == "TZOFFSETTO":
if parms:
raise ValueError, \
"unsupported TZOFFSETTO parm: "+parms[0]
tzoffsetto = self._parse_offset(value)
elif name == "TZNAME":
if parms:
raise ValueError, \
"unsupported TZNAME parm: "+parms[0]
tzname = value
elif name == "COMMENT":
pass
else:
raise ValueError, "unsupported property: "+name
else:
if name == "TZID":
if parms:
raise ValueError, \
"unsupported TZID parm: "+parms[0]
tzid = value
elif name in ("TZURL", "LAST-MODIFIED", "COMMENT"):
pass
else:
raise ValueError, "unsupported property: "+name
elif name == "BEGIN" and value == "VTIMEZONE":
tzid = None
comps = []
invtz = True
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, `self._s`)
if sys.platform != "win32":
TZFILES = ["/etc/localtime", "localtime"]
TZPATHS = ["/usr/share/zoneinfo", "/usr/lib/zoneinfo", "/etc/zoneinfo"]
else:
TZFILES = []
TZPATHS = []
def gettz(name=None):
tz = None
if not name:
try:
name = os.environ["TZ"]
except KeyError:
pass
if name is None or name == ":":
for filepath in TZFILES:
if not os.path.isabs(filepath):
filename = filepath
for path in TZPATHS:
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
break
else:
continue
if os.path.isfile(filepath):
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = tzlocal()
else:
if name.startswith(":"):
name = name[:-1]
if os.path.isabs(name):
if os.path.isfile(name):
tz = tzfile(name)
else:
tz = None
else:
for path in TZPATHS:
filepath = os.path.join(path, name)
if not os.path.isfile(filepath):
filepath = filepath.replace(' ','_')
if not os.path.isfile(filepath):
continue
try:
tz = tzfile(filepath)
break
except (IOError, OSError, ValueError):
pass
else:
tz = None
if tzwin:
try:
tz = tzwin(name)
except OSError:
pass
if not tz:
from dateutil.zoneinfo import gettz
tz = gettz(name)
if not tz:
for c in name:
# name must have at least one offset to be a tzstr
if c in "0123456789":
try:
tz = tzstr(name)
except ValueError:
pass
break
else:
if name in ("GMT", "UTC"):
tz = tzutc()
elif name in time.tzname:
tz = tzlocal()
return tz
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import itertools
import datetime
import calendar
import thread
import sys
__all__ = ["rrule", "rruleset", "rrulestr",
"YEARLY", "MONTHLY", "WEEKLY", "DAILY",
"HOURLY", "MINUTELY", "SECONDLY",
"MO", "TU", "WE", "TH", "FR", "SA", "SU"]
# Every mask is 7 days longer to handle cross-year weekly periods.
M366MASK = tuple([1]*31+[2]*29+[3]*31+[4]*30+[5]*31+[6]*30+
[7]*31+[8]*31+[9]*30+[10]*31+[11]*30+[12]*31+[1]*7)
M365MASK = list(M366MASK)
M29, M30, M31 = range(1,30), range(1,31), range(1,32)
MDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
MDAY365MASK = list(MDAY366MASK)
M29, M30, M31 = range(-29,0), range(-30,0), range(-31,0)
NMDAY366MASK = tuple(M31+M29+M31+M30+M31+M30+M31+M31+M30+M31+M30+M31+M31[:7])
NMDAY365MASK = list(NMDAY366MASK)
M366RANGE = (0,31,60,91,121,152,182,213,244,274,305,335,366)
M365RANGE = (0,31,59,90,120,151,181,212,243,273,304,334,365)
WDAYMASK = [0,1,2,3,4,5,6]*55
del M29, M30, M31, M365MASK[59], MDAY365MASK[59], NMDAY365MASK[31]
MDAY365MASK = tuple(MDAY365MASK)
M365MASK = tuple(M365MASK)
(YEARLY,
MONTHLY,
WEEKLY,
DAILY,
HOURLY,
MINUTELY,
SECONDLY) = range(7)
# Imported on demand.
easter = None
parser = None
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
if n == 0:
raise ValueError, "Can't create weekday with n == 0"
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class rrulebase:
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = thread.allocate_lock()
self._cache_gen = self._iter()
self._cache_complete = False
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(gen.next())
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxint,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = gen.next()
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penality.
def count(self):
if self._len is None:
for x in self: pass
return self._len
def before(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def between(self, after, before, inc=False):
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
class rrule(rrulebase):
def __init__(self, freq, dtstart=None,
interval=1, wkst=None, count=None, until=None, bysetpos=None,
bymonth=None, bymonthday=None, byyearday=None, byeaster=None,
byweekno=None, byweekday=None,
byhour=None, byminute=None, bysecond=None,
cache=False):
rrulebase.__init__(self, cache)
global easter
if not dtstart:
dtstart = datetime.datetime.now().replace(microsecond=0)
elif not isinstance(dtstart, datetime.datetime):
dtstart = datetime.datetime.fromordinal(dtstart.toordinal())
else:
dtstart = dtstart.replace(microsecond=0)
self._dtstart = dtstart
self._tzinfo = dtstart.tzinfo
self._freq = freq
self._interval = interval
self._count = count
if until and not isinstance(until, datetime.datetime):
until = datetime.datetime.fromordinal(until.toordinal())
self._until = until
if wkst is None:
self._wkst = calendar.firstweekday()
elif type(wkst) is int:
self._wkst = wkst
else:
self._wkst = wkst.weekday
if bysetpos is None:
self._bysetpos = None
elif type(bysetpos) is int:
if bysetpos == 0 or not (-366 <= bysetpos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
self._bysetpos = (bysetpos,)
else:
self._bysetpos = tuple(bysetpos)
for pos in self._bysetpos:
if pos == 0 or not (-366 <= pos <= 366):
raise ValueError("bysetpos must be between 1 and 366, "
"or between -366 and -1")
if not (byweekno or byyearday or bymonthday or
byweekday is not None or byeaster is not None):
if freq == YEARLY:
if not bymonth:
bymonth = dtstart.month
bymonthday = dtstart.day
elif freq == MONTHLY:
bymonthday = dtstart.day
elif freq == WEEKLY:
byweekday = dtstart.weekday()
# bymonth
if not bymonth:
self._bymonth = None
elif type(bymonth) is int:
self._bymonth = (bymonth,)
else:
self._bymonth = tuple(bymonth)
# byyearday
if not byyearday:
self._byyearday = None
elif type(byyearday) is int:
self._byyearday = (byyearday,)
else:
self._byyearday = tuple(byyearday)
# byeaster
if byeaster is not None:
if not easter:
from dateutil import easter
if type(byeaster) is int:
self._byeaster = (byeaster,)
else:
self._byeaster = tuple(byeaster)
else:
self._byeaster = None
# bymonthay
if not bymonthday:
self._bymonthday = ()
self._bynmonthday = ()
elif type(bymonthday) is int:
if bymonthday < 0:
self._bynmonthday = (bymonthday,)
self._bymonthday = ()
else:
self._bymonthday = (bymonthday,)
self._bynmonthday = ()
else:
self._bymonthday = tuple([x for x in bymonthday if x > 0])
self._bynmonthday = tuple([x for x in bymonthday if x < 0])
# byweekno
if byweekno is None:
self._byweekno = None
elif type(byweekno) is int:
self._byweekno = (byweekno,)
else:
self._byweekno = tuple(byweekno)
# byweekday / bynweekday
if byweekday is None:
self._byweekday = None
self._bynweekday = None
elif type(byweekday) is int:
self._byweekday = (byweekday,)
self._bynweekday = None
elif hasattr(byweekday, "n"):
if not byweekday.n or freq > MONTHLY:
self._byweekday = (byweekday.weekday,)
self._bynweekday = None
else:
self._bynweekday = ((byweekday.weekday, byweekday.n),)
self._byweekday = None
else:
self._byweekday = []
self._bynweekday = []
for wday in byweekday:
if type(wday) is int:
self._byweekday.append(wday)
elif not wday.n or freq > MONTHLY:
self._byweekday.append(wday.weekday)
else:
self._bynweekday.append((wday.weekday, wday.n))
self._byweekday = tuple(self._byweekday)
self._bynweekday = tuple(self._bynweekday)
if not self._byweekday:
self._byweekday = None
elif not self._bynweekday:
self._bynweekday = None
# byhour
if byhour is None:
if freq < HOURLY:
self._byhour = (dtstart.hour,)
else:
self._byhour = None
elif type(byhour) is int:
self._byhour = (byhour,)
else:
self._byhour = tuple(byhour)
# byminute
if byminute is None:
if freq < MINUTELY:
self._byminute = (dtstart.minute,)
else:
self._byminute = None
elif type(byminute) is int:
self._byminute = (byminute,)
else:
self._byminute = tuple(byminute)
# bysecond
if bysecond is None:
if freq < SECONDLY:
self._bysecond = (dtstart.second,)
else:
self._bysecond = None
elif type(bysecond) is int:
self._bysecond = (bysecond,)
else:
self._bysecond = tuple(bysecond)
if self._freq >= HOURLY:
self._timeset = None
else:
self._timeset = []
for hour in self._byhour:
for minute in self._byminute:
for second in self._bysecond:
self._timeset.append(
datetime.time(hour, minute, second,
tzinfo=self._tzinfo))
self._timeset.sort()
self._timeset = tuple(self._timeset)
def _iter(self):
year, month, day, hour, minute, second, weekday, yearday, _ = \
self._dtstart.timetuple()
# Some local variables to speed things up a bit
freq = self._freq
interval = self._interval
wkst = self._wkst
until = self._until
bymonth = self._bymonth
byweekno = self._byweekno
byyearday = self._byyearday
byweekday = self._byweekday
byeaster = self._byeaster
bymonthday = self._bymonthday
bynmonthday = self._bynmonthday
bysetpos = self._bysetpos
byhour = self._byhour
byminute = self._byminute
bysecond = self._bysecond
ii = _iterinfo(self)
ii.rebuild(year, month)
getdayset = {YEARLY:ii.ydayset,
MONTHLY:ii.mdayset,
WEEKLY:ii.wdayset,
DAILY:ii.ddayset,
HOURLY:ii.ddayset,
MINUTELY:ii.ddayset,
SECONDLY:ii.ddayset}[freq]
if freq < HOURLY:
timeset = self._timeset
else:
gettimeset = {HOURLY:ii.htimeset,
MINUTELY:ii.mtimeset,
SECONDLY:ii.stimeset}[freq]
if ((freq >= HOURLY and
self._byhour and hour not in self._byhour) or
(freq >= MINUTELY and
self._byminute and minute not in self._byminute) or
(freq >= SECONDLY and
self._bysecond and second not in self._bysecond)):
timeset = ()
else:
timeset = gettimeset(hour, minute, second)
total = 0
count = self._count
while True:
# Get dayset with the right frequency
dayset, start, end = getdayset(year, month, day)
# Do the "hard" work ;-)
filtered = False
for i in dayset[start:end]:
if ((bymonth and ii.mmask[i] not in bymonth) or
(byweekno and not ii.wnomask[i]) or
(byweekday and ii.wdaymask[i] not in byweekday) or
(ii.nwdaymask and not ii.nwdaymask[i]) or
(byeaster and not ii.eastermask[i]) or
((bymonthday or bynmonthday) and
ii.mdaymask[i] not in bymonthday and
ii.nmdaymask[i] not in bynmonthday) or
(byyearday and
((i < ii.yearlen and i+1 not in byyearday
and -ii.yearlen+i not in byyearday) or
(i >= ii.yearlen and i+1-ii.yearlen not in byyearday
and -ii.nextyearlen+i-ii.yearlen
not in byyearday)))):
dayset[i] = None
filtered = True
# Output results
if bysetpos and timeset:
poslist = []
for pos in bysetpos:
if pos < 0:
daypos, timepos = divmod(pos, len(timeset))
else:
daypos, timepos = divmod(pos-1, len(timeset))
try:
i = [x for x in dayset[start:end]
if x is not None][daypos]
time = timeset[timepos]
except IndexError:
pass
else:
date = datetime.date.fromordinal(ii.yearordinal+i)
res = datetime.datetime.combine(date, time)
if res not in poslist:
poslist.append(res)
poslist.sort()
for res in poslist:
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
else:
for i in dayset[start:end]:
if i is not None:
date = datetime.date.fromordinal(ii.yearordinal+i)
for time in timeset:
res = datetime.datetime.combine(date, time)
if until and res > until:
self._len = total
return
elif res >= self._dtstart:
total += 1
yield res
if count:
count -= 1
if not count:
self._len = total
return
# Handle frequency and interval
fixday = False
if freq == YEARLY:
year += interval
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == MONTHLY:
month += interval
if month > 12:
div, mod = divmod(month, 12)
month = mod
year += div
if month == 0:
month = 12
year -= 1
if year > datetime.MAXYEAR:
self._len = total
return
ii.rebuild(year, month)
elif freq == WEEKLY:
if wkst > weekday:
day += -(weekday+1+(6-wkst))+self._interval*7
else:
day += -(weekday-wkst)+self._interval*7
weekday = wkst
fixday = True
elif freq == DAILY:
day += interval
fixday = True
elif freq == HOURLY:
if filtered:
# Jump to one iteration before next day
hour += ((23-hour)//interval)*interval
while True:
hour += interval
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if not byhour or hour in byhour:
break
timeset = gettimeset(hour, minute, second)
elif freq == MINUTELY:
if filtered:
# Jump to one iteration before next day
minute += ((1439-(hour*60+minute))//interval)*interval
while True:
minute += interval
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
filtered = False
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute)):
break
timeset = gettimeset(hour, minute, second)
elif freq == SECONDLY:
if filtered:
# Jump to one iteration before next day
second += (((86399-(hour*3600+minute*60+second))
//interval)*interval)
while True:
second += self._interval
div, mod = divmod(second, 60)
if div:
second = mod
minute += div
div, mod = divmod(minute, 60)
if div:
minute = mod
hour += div
div, mod = divmod(hour, 24)
if div:
hour = mod
day += div
fixday = True
if ((not byhour or hour in byhour) and
(not byminute or minute in byminute) and
(not bysecond or second in bysecond)):
break
timeset = gettimeset(hour, minute, second)
if fixday and day > 28:
daysinmonth = calendar.monthrange(year, month)[1]
if day > daysinmonth:
while day > daysinmonth:
day -= daysinmonth
month += 1
if month == 13:
month = 1
year += 1
if year > datetime.MAXYEAR:
self._len = total
return
daysinmonth = calendar.monthrange(year, month)[1]
ii.rebuild(year, month)
class _iterinfo(object):
__slots__ = ["rrule", "lastyear", "lastmonth",
"yearlen", "nextyearlen", "yearordinal", "yearweekday",
"mmask", "mrange", "mdaymask", "nmdaymask",
"wdaymask", "wnomask", "nwdaymask", "eastermask"]
def __init__(self, rrule):
for attr in self.__slots__:
setattr(self, attr, None)
self.rrule = rrule
def rebuild(self, year, month):
# Every mask is 7 days longer to handle cross-year weekly periods.
rr = self.rrule
if year != self.lastyear:
self.yearlen = 365+calendar.isleap(year)
self.nextyearlen = 365+calendar.isleap(year+1)
firstyday = datetime.date(year, 1, 1)
self.yearordinal = firstyday.toordinal()
self.yearweekday = firstyday.weekday()
wday = datetime.date(year, 1, 1).weekday()
if self.yearlen == 365:
self.mmask = M365MASK
self.mdaymask = MDAY365MASK
self.nmdaymask = NMDAY365MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M365RANGE
else:
self.mmask = M366MASK
self.mdaymask = MDAY366MASK
self.nmdaymask = NMDAY366MASK
self.wdaymask = WDAYMASK[wday:]
self.mrange = M366RANGE
if not rr._byweekno:
self.wnomask = None
else:
self.wnomask = [0]*(self.yearlen+7)
#no1wkst = firstwkst = self.wdaymask.index(rr._wkst)
no1wkst = firstwkst = (7-self.yearweekday+rr._wkst)%7
if no1wkst >= 4:
no1wkst = 0
# Number of days in the year, plus the days we got
# from last year.
wyearlen = self.yearlen+(self.yearweekday-rr._wkst)%7
else:
# Number of days in the year, minus the days we
# left in last year.
wyearlen = self.yearlen-no1wkst
div, mod = divmod(wyearlen, 7)
numweeks = div+mod//4
for n in rr._byweekno:
if n < 0:
n += numweeks+1
if not (0 < n <= numweeks):
continue
if n > 1:
i = no1wkst+(n-1)*7
if no1wkst != firstwkst:
i -= 7-firstwkst
else:
i = no1wkst
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if 1 in rr._byweekno:
# Check week number 1 of next year as well
# TODO: Check -numweeks for next year.
i = no1wkst+numweeks*7
if no1wkst != firstwkst:
i -= 7-firstwkst
if i < self.yearlen:
# If week starts in next year, we
# don't care about it.
for j in range(7):
self.wnomask[i] = 1
i += 1
if self.wdaymask[i] == rr._wkst:
break
if no1wkst:
# Check last week number of last year as
# well. If no1wkst is 0, either the year
# started on week start, or week number 1
# got days from last year, so there are no
# days from last year's last week number in
# this year.
if -1 not in rr._byweekno:
lyearweekday = datetime.date(year-1,1,1).weekday()
lno1wkst = (7-lyearweekday+rr._wkst)%7
lyearlen = 365+calendar.isleap(year-1)
if lno1wkst >= 4:
lno1wkst = 0
lnumweeks = 52+(lyearlen+
(lyearweekday-rr._wkst)%7)%7//4
else:
lnumweeks = 52+(self.yearlen-no1wkst)%7//4
else:
lnumweeks = -1
if lnumweeks in rr._byweekno:
for i in range(no1wkst):
self.wnomask[i] = 1
if (rr._bynweekday and
(month != self.lastmonth or year != self.lastyear)):
ranges = []
if rr._freq == YEARLY:
if rr._bymonth:
for month in rr._bymonth:
ranges.append(self.mrange[month-1:month+1])
else:
ranges = [(0, self.yearlen)]
elif rr._freq == MONTHLY:
ranges = [self.mrange[month-1:month+1]]
if ranges:
# Weekly frequency won't get here, so we may not
# care about cross-year weekly periods.
self.nwdaymask = [0]*self.yearlen
for first, last in ranges:
last -= 1
for wday, n in rr._bynweekday:
if n < 0:
i = last+(n+1)*7
i -= (self.wdaymask[i]-wday)%7
else:
i = first+(n-1)*7
i += (7-self.wdaymask[i]+wday)%7
if first <= i <= last:
self.nwdaymask[i] = 1
if rr._byeaster:
self.eastermask = [0]*(self.yearlen+7)
eyday = easter.easter(year).toordinal()-self.yearordinal
for offset in rr._byeaster:
self.eastermask[eyday+offset] = 1
self.lastyear = year
self.lastmonth = month
def ydayset(self, year, month, day):
return range(self.yearlen), 0, self.yearlen
def mdayset(self, year, month, day):
set = [None]*self.yearlen
start, end = self.mrange[month-1:month+1]
for i in range(start, end):
set[i] = i
return set, start, end
def wdayset(self, year, month, day):
# We need to handle cross-year weeks here.
set = [None]*(self.yearlen+7)
i = datetime.date(year, month, day).toordinal()-self.yearordinal
start = i
for j in range(7):
set[i] = i
i += 1
#if (not (0 <= i < self.yearlen) or
# self.wdaymask[i] == self.rrule._wkst):
# This will cross the year boundary, if necessary.
if self.wdaymask[i] == self.rrule._wkst:
break
return set, start, i
def ddayset(self, year, month, day):
set = [None]*self.yearlen
i = datetime.date(year, month, day).toordinal()-self.yearordinal
set[i] = i
return set, i, i+1
def htimeset(self, hour, minute, second):
set = []
rr = self.rrule
for minute in rr._byminute:
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second,
tzinfo=rr._tzinfo))
set.sort()
return set
def mtimeset(self, hour, minute, second):
set = []
rr = self.rrule
for second in rr._bysecond:
set.append(datetime.time(hour, minute, second, tzinfo=rr._tzinfo))
set.sort()
return set
def stimeset(self, hour, minute, second):
return (datetime.time(hour, minute, second,
tzinfo=self.rrule._tzinfo),)
class rruleset(rrulebase):
class _genitem:
def __init__(self, genlist, gen):
try:
self.dt = gen()
genlist.append(self)
except StopIteration:
pass
self.genlist = genlist
self.gen = gen
def next(self):
try:
self.dt = self.gen()
except StopIteration:
self.genlist.remove(self)
def __cmp__(self, other):
return cmp(self.dt, other.dt)
def __init__(self, cache=False):
rrulebase.__init__(self, cache)
self._rrule = []
self._rdate = []
self._exrule = []
self._exdate = []
def rrule(self, rrule):
self._rrule.append(rrule)
def rdate(self, rdate):
self._rdate.append(rdate)
def exrule(self, exrule):
self._exrule.append(exrule)
def exdate(self, exdate):
self._exdate.append(exdate)
def _iter(self):
rlist = []
self._rdate.sort()
self._genitem(rlist, iter(self._rdate).next)
for gen in [iter(x).next for x in self._rrule]:
self._genitem(rlist, gen)
rlist.sort()
exlist = []
self._exdate.sort()
self._genitem(exlist, iter(self._exdate).next)
for gen in [iter(x).next for x in self._exrule]:
self._genitem(exlist, gen)
exlist.sort()
lastdt = None
total = 0
while rlist:
ritem = rlist[0]
if not lastdt or lastdt != ritem.dt:
while exlist and exlist[0] < ritem:
exlist[0].next()
exlist.sort()
if not exlist or ritem != exlist[0]:
total += 1
yield ritem.dt
lastdt = ritem.dt
ritem.next()
rlist.sort()
self._len = total
class _rrulestr:
_freq_map = {"YEARLY": YEARLY,
"MONTHLY": MONTHLY,
"WEEKLY": WEEKLY,
"DAILY": DAILY,
"HOURLY": HOURLY,
"MINUTELY": MINUTELY,
"SECONDLY": SECONDLY}
_weekday_map = {"MO":0,"TU":1,"WE":2,"TH":3,"FR":4,"SA":5,"SU":6}
def _handle_int(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = int(value)
def _handle_int_list(self, rrkwargs, name, value, **kwargs):
rrkwargs[name.lower()] = [int(x) for x in value.split(',')]
_handle_INTERVAL = _handle_int
_handle_COUNT = _handle_int
_handle_BYSETPOS = _handle_int_list
_handle_BYMONTH = _handle_int_list
_handle_BYMONTHDAY = _handle_int_list
_handle_BYYEARDAY = _handle_int_list
_handle_BYEASTER = _handle_int_list
_handle_BYWEEKNO = _handle_int_list
_handle_BYHOUR = _handle_int_list
_handle_BYMINUTE = _handle_int_list
_handle_BYSECOND = _handle_int_list
def _handle_FREQ(self, rrkwargs, name, value, **kwargs):
rrkwargs["freq"] = self._freq_map[value]
def _handle_UNTIL(self, rrkwargs, name, value, **kwargs):
global parser
if not parser:
from dateutil import parser
try:
rrkwargs["until"] = parser.parse(value,
ignoretz=kwargs.get("ignoretz"),
tzinfos=kwargs.get("tzinfos"))
except ValueError:
raise ValueError, "invalid until date"
def _handle_WKST(self, rrkwargs, name, value, **kwargs):
rrkwargs["wkst"] = self._weekday_map[value]
def _handle_BYWEEKDAY(self, rrkwargs, name, value, **kwarsg):
l = []
for wday in value.split(','):
for i in range(len(wday)):
if wday[i] not in '+-0123456789':
break
n = wday[:i] or None
w = wday[i:]
if n: n = int(n)
l.append(weekdays[self._weekday_map[w]](n))
rrkwargs["byweekday"] = l
_handle_BYDAY = _handle_BYWEEKDAY
def _parse_rfc_rrule(self, line,
dtstart=None,
cache=False,
ignoretz=False,
tzinfos=None):
if line.find(':') != -1:
name, value = line.split(':')
if name != "RRULE":
raise ValueError, "unknown parameter name"
else:
value = line
rrkwargs = {}
for pair in value.split(';'):
name, value = pair.split('=')
name = name.upper()
value = value.upper()
try:
getattr(self, "_handle_"+name)(rrkwargs, name, value,
ignoretz=ignoretz,
tzinfos=tzinfos)
except AttributeError:
raise ValueError, "unknown parameter '%s'" % name
except (KeyError, ValueError):
raise ValueError, "invalid '%s': %s" % (name, value)
return rrule(dtstart=dtstart, cache=cache, **rrkwargs)
def _parse_rfc(self, s,
dtstart=None,
cache=False,
unfold=False,
forceset=False,
compatible=False,
ignoretz=False,
tzinfos=None):
global parser
if compatible:
forceset = True
unfold = True
s = s.upper()
if not s.strip():
raise ValueError, "empty string"
if unfold:
lines = s.splitlines()
i = 0
while i < len(lines):
line = lines[i].rstrip()
if not line:
del lines[i]
elif i > 0 and line[0] == " ":
lines[i-1] += line[1:]
del lines[i]
else:
i += 1
else:
lines = s.split()
if (not forceset and len(lines) == 1 and
(s.find(':') == -1 or s.startswith('RRULE:'))):
return self._parse_rfc_rrule(lines[0], cache=cache,
dtstart=dtstart, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
rrulevals = []
rdatevals = []
exrulevals = []
exdatevals = []
for line in lines:
if not line:
continue
if line.find(':') == -1:
name = "RRULE"
value = line
else:
name, value = line.split(':', 1)
parms = name.split(';')
if not parms:
raise ValueError, "empty property name"
name = parms[0]
parms = parms[1:]
if name == "RRULE":
for parm in parms:
raise ValueError, "unsupported RRULE parm: "+parm
rrulevals.append(value)
elif name == "RDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
rdatevals.append(value)
elif name == "EXRULE":
for parm in parms:
raise ValueError, "unsupported EXRULE parm: "+parm
exrulevals.append(value)
elif name == "EXDATE":
for parm in parms:
if parm != "VALUE=DATE-TIME":
raise ValueError, "unsupported RDATE parm: "+parm
exdatevals.append(value)
elif name == "DTSTART":
for parm in parms:
raise ValueError, "unsupported DTSTART parm: "+parm
if not parser:
from dateutil import parser
dtstart = parser.parse(value, ignoretz=ignoretz,
tzinfos=tzinfos)
else:
raise ValueError, "unsupported property: "+name
if (forceset or len(rrulevals) > 1 or
rdatevals or exrulevals or exdatevals):
if not parser and (rdatevals or exdatevals):
from dateutil import parser
set = rruleset(cache=cache)
for value in rrulevals:
set.rrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in rdatevals:
for datestr in value.split(','):
set.rdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exrulevals:
set.exrule(self._parse_rfc_rrule(value, dtstart=dtstart,
ignoretz=ignoretz,
tzinfos=tzinfos))
for value in exdatevals:
for datestr in value.split(','):
set.exdate(parser.parse(datestr,
ignoretz=ignoretz,
tzinfos=tzinfos))
if compatible and dtstart:
set.rdate(dtstart)
return set
else:
return self._parse_rfc_rrule(rrulevals[0],
dtstart=dtstart,
cache=cache,
ignoretz=ignoretz,
tzinfos=tzinfos)
def __call__(self, s, **kwargs):
return self._parse_rfc(s, **kwargs)
rrulestr = _rrulestr()
# vim:ts=4:sw=4:et
| Python |
"""
Copyright (c) 2003-2007 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
| Python |
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__version__ = "1.5"
| Python |
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard python 2.3+
datetime module.
"""
from dateutil.tz import tzfile
from tarfile import TarFile
import os
__author__ = "Gustavo Niemeyer <gustavo@niemeyer.net>"
__license__ = "PSF License"
__all__ = ["setcachesize", "gettz", "rebuild"]
CACHE = []
CACHESIZE = 10
class tzfile(tzfile):
def __reduce__(self):
return (gettz, (self._filename,))
def getzoneinfofile():
filenames = os.listdir(os.path.join(os.path.dirname(__file__)))
filenames.sort()
filenames.reverse()
for entry in filenames:
if entry.startswith("zoneinfo") and ".tar." in entry:
return os.path.join(os.path.dirname(__file__), entry)
return None
ZONEINFOFILE = getzoneinfofile()
del getzoneinfofile
def setcachesize(size):
global CACHESIZE, CACHE
CACHESIZE = size
del CACHE[size:]
def gettz(name):
tzinfo = None
if ZONEINFOFILE:
for cachedname, tzinfo in CACHE:
if cachedname == name:
break
else:
tf = TarFile.open(ZONEINFOFILE)
try:
zonefile = tf.extractfile(name)
except KeyError:
tzinfo = None
else:
tzinfo = tzfile(zonefile)
tf.close()
CACHE.insert(0, (name, tzinfo))
del CACHE[CACHESIZE:]
return tzinfo
def rebuild(filename, tag=None, format="gz"):
import tempfile, shutil
tmpdir = tempfile.mkdtemp()
zonedir = os.path.join(tmpdir, "zoneinfo")
moduledir = os.path.dirname(__file__)
if tag: tag = "-"+tag
targetname = "zoneinfo%s.tar.%s" % (tag, format)
try:
tf = TarFile.open(filename)
for name in tf.getnames():
if not (name.endswith(".sh") or
name.endswith(".tab") or
name == "leapseconds"):
tf.extract(name, tmpdir)
filepath = os.path.join(tmpdir, name)
os.system("zic -d %s %s" % (zonedir, filepath))
tf.close()
target = os.path.join(moduledir, targetname)
for entry in os.listdir(moduledir):
if entry.startswith("zoneinfo") and ".tar." in entry:
os.unlink(os.path.join(moduledir, entry))
tf = TarFile.open(target, "w:%s" % format)
for entry in os.listdir(zonedir):
entrypath = os.path.join(zonedir, entry)
tf.add(entrypath, entry)
tf.close()
finally:
shutil.rmtree(tmpdir)
| Python |
# This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import _winreg
__author__ = "Jeffrey Harris & Gustavo Niemeyer <gustavo@niemeyer.net>"
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
try:
_winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, TZKEYNAME)
result = [_winreg.EnumKey(tzkey, i)
for i in range(_winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzkey = _winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = _winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = _winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in xrange(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = _winreg.QueryInfoKey(key)[1]
for i in range(size):
data = _winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| Python |
'''
This module contains classes for managing paging.
'''
import google.appengine.ext.db as db
import google.appengine.api.memcache as memcache
import logging
import pickle
namespace = 'he3'
class PagedQuery(object):
'''
This class is a facade to a db.Query object that offers additional
functionality to enable paging operations on query datasets. This class
uses the cursor functionality introduced recently into Google App Engine
to provide a full paging abstraction.
Note that support for all Query and GqlQuery methods is provided, although
executing a method not supported by GqlQuery will raise an error on
PagedQuery objects instantiated with a GqlQuery object.
Of course, the cursor() and with_cursor() methods should only be used
rarely since most uses of cursors duplicates the functionality (and defeats
the purpose) of this facade. The cursor methods are provided for
completeness.
USAGE:
Instantiate a PagedQuery with an existing db.Query or db.GqlQuery and a
page size:
myPagedQuery = PagedQuery(myEntity.all(), 10)
PagedQuery supports the filter and ordering methods of db.Query if you
instantiate the object with a db.Query (not db.GqlQuery). You can apply
these methods before or after instancing the PagedQuery. Eg.
myQuery = myEntity.all().filter('myPropName >', my_prop_value)
myPagedQuery = PagedQuery(myQuery, 10)
myPagedQuery.order('-myPropName')
This is fine.
To fetch the first page of the results:
myResults = myPagedQuery.fetch_page()
To fetch any particular page, use a page number:
myResults = myPagedQuery.fetch_page(3)
On a subsequent request, recreate the same query and PagedQuery object, and
request another page:
myResults = myPagedQuery.fetch_page(4)
To determine whether a particular page exists:
nextPageExists = myPagedQuery.has_page(5)
To get a count of the number of pages available with the dataset:
num_pages = myPagedQuery.page_count()
Some necessary implementation details:
Cursor Limits: This class works using the Cursor features introduced in the
Google App Engine SDK 1.3.1. All cursor restrictions apply. In particular
, pages will not re-order if changes are made to the query results prior
to current page. Some query features (IN and != filters) will not work and
sorting on multi-value fields will be unreliable.
See http://code.google.com/appengine/docs/python/datastore/queriesandindexes.html#Query_Cursors
for more information
Efficient Use: The most efficent way to use PagedQuery is to retrieve
one successive page after another. Access to any previous page is just as
efficient. Avoid calling the page_count() method or requesting pages more
than one in advance of the highest page yet requested.
Memcache: Internally PagedQuery persists information to memcache. The
information cached includes a query identifier and a hash of pages and
cursors. Due to the unreliable nature of memcache, persistence can not be
ensured. PagedQuery will handle memcache misses, at a reduced
performance profile.
Data Updates: Because of the cached nature of the internal cursors, if you
need to ensure the most up to data is retrieve, clear all cached data:
myPagedQuery.clear()
myPagedQuery.fetch_page() (which returns the first page) also clears the
cached data.
Mutating the query in any way (using .filter(), order() or
similiar) also clears the cache.
Note that when retrieving a page for a second time, the internal cursors
are checked for changes. If changes exist, the cursors corresponding to all
subsequent pages are cleared from the cache.
'''
def __init__(self, query, page_size):
'''
Constructor for a paged query.
@param query: a google.appengine.ext.db.query object
@param page_size: a positive non-zero integer defining the size of
each page.
@raise TypeError: raised if query is not an instance of db.Query or
db.GqlQuery
'''
self._query = query
self._page_size = page_size
self._page_cursors = [None]
self._page_count = None
self._id = None
self._last_persisted_as = None
self._num_offset_queries = 0
self._num_cursor_queries = 0
self._num_page1_queries = 0
self._num_count_calls = 0
self._num_persist = 0
self._num_restore = 0
#find out if we are dealing with another facade object
if query.__dict__.has_key('_query'): query_to_check = query._query
else: query_to_check = query
if isinstance(query_to_check, db.Query): self._query_type = 'Query'
elif isinstance(query_to_check, db.GqlQuery): self._query_type = 'GqlQuery'
else: raise TypeError('Query type not supported: '\
+ type(query).__name__)
self._check_page_size(page_size)
def fetch_page(self, page_number=1, clear=False):
'''Fetches a single page of results from the datastore. A page in the
datastore starts at a specific position equal to
(page_size x page_number) - page_size (zero-based). If the page does
not exist (not enough results to support it), an empty list is returned
@param page_number: The number of the page to return. If None or no
parameter is specified for page_number, page 1 is returned and cache
cleared.
@return: A list of all entities on the specified page.
'''
if clear:
self.clear()
else:
self.id #force id to be assigned now
self._restore_if_required()
self._check_page_number(page_number)
if self._has_cursor_for_page(page_number):
offset = 0
self._query.with_cursor(self._get_cursor_for_page(page_number))
self._num_cursor_queries += 1
elif page_number > 1:
#if we can not use a cursor, we need to use the offset method
#the offset method errors if it is out of range. Therefore:
#if page_number > 1 and page_number > self.page_count(): return []
self._query.with_cursor(None)
offset = (self.page_size * (page_number -1))
#record that we did an offset query. Useful for testing
self._num_offset_queries += 1
else:
self._num_page1_queries += 1
self._query.with_cursor(None)
offset= 0
results = self.fetch(limit=self.page_size, offset=offset)
self._update_cursors_with_results(page_number, results)
self._query.with_cursor(None)
self._persist_if_required()
return results
def clear(self):
'''Clears the cached data for the current query'''
memcache.Client().delete(self._get_memcache_key())
self._page_cursors = [None]
self._page_count = None
self._last_persisted_as = None
self._id = None
def page_count(self):
'''Returns the number of pages that can be returned by the query
@return: an integer value of 0 or higher indicating the total number
of pages available, up to limit
@warning: The maximum number of pages return is equal to 1000/page_size
or the maximum number of pages returned by fetch_page(), whichever is greater.
'''
if not self._page_count:
result_count = self._query.count()
(full_pages, remainder) = divmod(result_count, self.page_size)
self._page_count = full_pages if remainder == 0 else full_pages + 1
#Record we did a query.count() call
self._num_count_calls += 1
return self._page_count
def has_page(self, page_number):
'''Returns True if the requested page exists for the current
PagedQuery object. Note that calling this method for a page at or below
that for whose page has already been fetched is cheaper performance-
wise than calling it for a page not yet visited. Of course, if another
action causes a full count() of the query then this action is cheap
regardless.
@param page_number: Page number to test the existence of
@return: True of false depending on whether the page exists. ie
has_page(n) == len(fetch_page(n)) > 0'''
#we might be able to avoid an unneccesary query.count() if we can see
#a cursor already exists for page-number or a higher page.
return page_number > 0 and (len(self._page_cursors) > page_number or page_number <= self.page_count())
def fetch(self, limit, offset=0):
''' executes query against datastore as per db.Query.fetch()
@param limit: Maximum amount of results to retrieve as per
db.Query.fetch()
@param offset: Number of results to skip prior to returning resultset.
As per db.Query.fetch().
@return: A list of entity results, as per db.Query.fetch()
NOTE: this method should match the corresponding signature of
db.Query.fetch() precisely.
@see: http://code.google.com/appengine/docs/python/datastore/queryclass.html
'''
return self._query.fetch(limit,offset)
def filter(self, property_operator, value):
'''Adds a property condition filter to the query. Only entities with
properties that meet all of the conditions will be returned by the
query. This method should behave identically to the db.Query.filter()
method. Using this method also clears any caching of the object.
@attention: This method is only available for Queries used
to initalise the PagedQuery of type db.Query
@see: http://code.google.com/appengine/docs/python/datastore/queryclass.html
@param property_operator: A string containing the property name, and an
optional comparison operator
@param value: The value to use in the comparison on the right-hand side
of the expression
@return: The query with filter added
@raise TypeError: raised if the query not the correct type
'''
self._check_query_type_is('Query')
self.clear()
self._query = self._query.filter(property_operator, value)
return self
def order(self, property):
'''Adds an ordering for the results. Results are ordered starting with
the first order added. This method should behave identically to the
db.Query.order() method. Using this method also clears any caching of
the object.
@attention: This method is only available for Queries used
to initalise the PagedQuery of type db.Query
@see: http://code.google.com/appengine/docs/python/datastore/queryclass.html
@param property: A string, the name of the property to order
@return: The query with order added
@raise TypeError: raised if the query not the correct type
'''
self._check_query_type_is('Query')
self.clear()
self._query.order(property)
return self
def ancestor(self, ancestor):
'''Adds an ancestor condition filter to the query. Only entities with
the given entity as an ancestor (anywhere in its path) will be returned
by the query. This method should behave identically to the
db.Query.ancestor() method. Using this method also clears any caching of
the object.
@attention: This method is only available for Queries used
to initalise the PagedQuery of type db.Query
@see: http://code.google.com/appengine/docs/python/datastore/queryclass.html
@param ancestor: A Model instance or Key instance representing the
ancestor.
@return: Itself after ancestor condition has been added
@raise TypeError: raised if the query not the correct type
'''
self._check_query_type_is('Query')
self.clear()
self._query.ancestor(ancestor)
return self
def count(self, limit=1000):
'''Returns the number of results this query fetches. This method should
behave identically to the method of the same name of db.Query and
db.GqlQuery
@see: http://code.google.com/appengine/docs/python/datastore/queryclass.html
@param limit: The maximum number of results to count.
@return: Returns the number of result this query fetches
'''
return self._query.count(limit)
def _get_page_size(self):
'''Returns the page size set during instantiation or using
set_page_size()
@return: An integer greater than zero indicating the number of results
to be returned on each page.
'''
return self._page_size
def _set_page_size(self, new_page_size):
'''Sets the page size of the PagedQuery. If the new page_size differs
from the existing page size, the cache is cleared.
@param new_page_size: an integer greater than zero indicating the number
of results to be returned on each page.
@return: void
'''
self._check_page_size(new_page_size)
if new_page_size != self._page_size:
self.clear()
self._page_size = new_page_size
def _has_cursor_for_page(self, page_number):
'''Returns True if a page_cursor is available for a specific page, False
otherwise
@param page_number: The non-zero positive integer page number for which
to check the cursor for
@return: True if a cursor exists for the page number, or false if not
'''
return (len(self._page_cursors) >= page_number\
and self._page_cursors[page_number-1])
def _set_cursor_for_page(self, page_number, cursor):
'''Sets a cursor for a specific page.
@param page_number: The non-zero positive integer page number to set the
the cursor for
@param cursor: the string cursor generated by query.cursor() to set for
the supplied page number
@return: void
'''
#append None values to page_cursors if required
while len(self._page_cursors) < page_number:
self._page_cursors.append(None)
self._page_cursors[page_number-1] = cursor
def _get_cursor_for_page(self, page_number):
'''Returns the cursor a for a page. Page must be known to exist prior
to calling. If the page does not exist an exception will be raised.
@param page_number: The non-zero positive integer page number to
to return the cursor for
@return: The cursor for the page number specified
@raise unknown: If the page number does not exist
'''
return self._page_cursors[page_number-1]
def _get_query_id(self):
'''Returns the ID of the query. This id is unique to the query. Whenever
a query is rebuilt the same way (ie semantically identical) the ID will
be the same
@return: a string ID
@todo: initial version cached id value. For some reason this caused
unexplainable errors in test cases. Cause unknown
'''
if not self._id:
self._id = self._generate_query_id()
return self._id
def _generate_query_id(self):
'''Generates a query ID for the PagedQuery from scratch
@return: a string ID
'''
return str(hash(pickle.dumps(self._query,2)))
def _check_query_type_is(self, required_query_type):
'''This is a helper method to assert that the query the PagedQuery was
initialised with is of the correct type.
@param required_query_type: Value of self._query_type expected (
currently only 'Query' or 'GqlQuery')
@return: nothing
@raise TypeError: raised if the query not the correct type
'''
if self._query_type != required_query_type:
raise TypeError('Operation not allowed for query type ('\
+ type(self._query).__name__)
def _check_page_number(self, page_number):
'''This is a helper method to assert that the page_number provided is
of the correct type and value
@param page_size: page_number value to check
@return: nothing
@raise: TypeError if the page_number is not a positive integer over 0
'''
if type(page_number) != int or page_number < 1:
raise TypeError(
'A page number must be a positive integer greater than 0')
def _check_page_size(self, page_size):
'''This is a helper method to check the type and value of a page_size
parameter to ensure it is valid. If it is not valid a TypeError is
thrown
@param page_size: page_size value to check
@return: nothing
@raise: TypeError if the page_size is not a positive integer over 0
'''
if type(page_size) != int or page_size < 1:
raise TypeError(
'A page size must be a positive integer greater than 0')
def _update_cursors_with_results(self, page_number, results):
'''Updates the cached page cursors with information inferred from the
page_number and the contents of that page number.
@param page_number: non-zero positive integer page number that generated
the results.
@param results: List of entities returned by a Query or GQL querty for
a specific page.
@return: Nothing
'''
if len(results) == self.page_size:
#persist the cursor (but only if a full page of results has been
#returned)
self._set_cursor_for_page(
page_number = page_number + 1,
cursor = self._query.cursor())
elif len(results) == 0:
#remove the cursor for the current page
self._set_cursor_for_page(
page_number = page_number,
cursor = None)
def _persist_if_required(self):
'''Persists the persistable cached elements of the object for retrieval
in a separate request only if conditions are appropriate.
@return: nothing
'''
persisted_form = self._get_persisted_form()
if (not self._last_persisted_as)\
or self._last_persisted_as != persisted_form:
self._persist(persisted_form)
self._last_persisted_as = persisted_form
def _persist(self, persisted_form):
'''Persists the provided persisted form to the memcache peristence layer
@param persisted_form: object to persist
@return: nothing
'''
memcache.Client().set(self._get_memcache_key(), persisted_form)
self._num_persist += 1
def _restore_if_required(self):
'''Restores the persisted version of the PagedQuery if required.
'''
if not self._last_persisted_as:
self._last_persisted_as = self._restore()
def _restore(self):
'''Restored any persisted version of the query to the correct values
within the query and returns the persisted form
@return: The persisted form
'''
persisted_form = memcache.Client().get(self._get_memcache_key())
if persisted_form:
self._page_cursors = [s for s in persisted_form['page_cursors']]
self._page_count = persisted_form['page_count']
self._num_restore += 1
return persisted_form
def _get_memcache_key(self):
'''Returns the correct memcache key used to identify this query in
the memcache system
@return: A string memcache key to use
'''
return namespace + '_PagedQuery-persistence_' + str(self.id)
def _get_persisted_form(self):
'''Returns the form the PagedQuery information is persisted in
@return an object
'''
return {
'page_cursors':[s for s in self._page_cursors],
'page_count':self._page_count
}
page_size = property(fget=_get_page_size, fset=_set_page_size,
doc='Configured page size of the PagedQuery')
id = property(fget=_get_query_id, doc='unique id of this query')
class PageLinks:
'''This is an object representing a list of hyperlinks to a set of
pages.
'''
def __init__(self, page, page_count, url_root, page_range= 10):
'''intialises the PageLinks object with the information required
to generate the page link set
@param page: The current page
@param page_count: The total number of pages
@param url_root: The start of the URL assigned to each page.
@param page_range: number of pages in total to show, excluding previous
, next and current page. rounded down for odd numbers. Must be positive
and non-zero.
'''
self.page = page
self.page_count = page_count
self.url_root = url_root
self.page_range = page_range
def get_links(self):
'''uses the initialisation information to return a list of links
@return: A list of text and url pairs
'''
#find the number of items to show either side (if possible)
i_side_range = self.page_range//2
#create the appropriate page range to show
if self.page < i_side_range + 1:
pages = range(1,
self.page_count + 1 if self.page_count < (2*i_side_range) else (2*i_side_range)+1)
else:
pages = range(self.page - i_side_range
, self.page_count + 1 if self.page_count < (self.page + i_side_range)
else (self.page + i_side_range + 1))
#use page range to construct list
page_links =\
[(str(p),'%s%d/' % (self.url_root, p)) for p in pages]
#add a prev link if required
if self.page > 1:
prev_link = ('Prev', '%s%d/' %
(self.url_root, self.page - 1) )
page_links.insert(0,prev_link)
#add a next link if required
if self.page < self.page_count:
next_link = ('Next', '%s%d/' %
(self.url_root, self.page + 1) )
page_links.append(next_link)
return page_links
| Python |
#!/usr/bin/env python
# encoding: utf-8
"""
datastore_cache.py
Created by Alkis Evlogimenos on 2009-04-19.
Modified by keakon on 2010-10-31.
"""
import itertools
#import logging
import threading
from google.appengine.api import memcache
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
"""Provides a shim that caches datastore Get calls.
Example code:
import datastore_cache
datastore_cache.DatastoreCachingShim.Install()
# ...
def main(args):
util.run_wsgi_app(application)
"""
_QUERY_CACHE_TIME = 600
class APIProxyShim(object):
"""A generic shim class, with methods to install/uninstall it.
Subclasses of this class can be used to replace the real stub for a service,
intercepting and possibly passing on calls to the original stub.
"""
SERVICE_NAME = None # To be overridden by subclasses
_instance = None
def __init__(self, wrapped_stub):
"""Constructor. Internal use only - see Install()."""
self._wrapped_stub = wrapped_stub
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
def CallWrappedStub(self, call, request, response):
"""Allows subclasses to call the wrapped stub."""
self._wrapped_stub.MakeSyncCall(self.SERVICE_NAME, call, request, response)
def MakeSyncCall(self, service, call, request, response):
assert (service == self.SERVICE_NAME,
'Got service name "%s", expected "%s"'
% (service, self.SERVICE_NAME))
messages = []
assert request.IsInitialized(messages), messages
method = getattr(self, '_Dynamic_' + call, None)
if method:
method(request, response)
else:
self.CallWrappedStub(call, request, response)
assert response.IsInitialized(messages), messages
def __getattr__(self, name):
"""Pass-through to the wrapped stub."""
return getattr(self._wrapped_stub, name)
@classmethod
def Install(cls):
"""Installs the shim. Only needs to be run once at import time.
Note that this accesses internal members of APIProxyStubMap, so may break
in future.
"""
if not cls._instance:
wrapped_stub = apiproxy_stub_map.apiproxy.GetStub(cls.SERVICE_NAME)
assert wrapped_stub, "No service '%s' found to wrap." % cls.SERVICE_NAME
cls._instance = cls(wrapped_stub)
stub_dict = apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map
stub_dict[cls.SERVICE_NAME] = cls._instance
@classmethod
def Uninstall(cls):
"""Uninstalls the shim.
Note that there's no need to uninstall a shim after each request. You can
install it once at import time and leave it there between requests.
"""
if cls._instance:
stub_dict = apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map
stub_dict[cls.SERVICE_NAME] = cls._instance._wrapped_stub
cls._instance = None
class DatastoreCachingShim(APIProxyShim):
SERVICE_NAME = 'datastore_v3'
def __init__(self, default_stub):
super(DatastoreCachingShim, self).__init__(default_stub)
self.local = threading.local()
self.local.to_delete = dict()
def _Dynamic_Get(self, request, response):
"""Intercepts get requests and returns them from cache if available."""
#logging.info("Tx: %s, Keys: %s", request.has_transaction(), [str(x) for x in request.key_list()])
if request.has_transaction():
self.CallWrappedStub('Get', request, response)
return
new_request = datastore_pb.GetRequest()
new_response = datastore_pb.GetResponse()
encoded_keys = [k.Encode() for k in request.key_list()]
cached = memcache.get_multi(encoded_keys)
for key, encoded_key in itertools.izip(request.key_list(), encoded_keys):
if encoded_key not in cached:
new_request.add_key().CopyFrom(key)
if new_request.key_size() > 0:
self.CallWrappedStub('Get', new_request, new_response)
entity_iter = iter(new_response.entity_list())
to_put = dict()
for encoded_key in encoded_keys:
entity = cached.get(encoded_key, None)
if entity:
response.add_entity().mutable_entity().CopyFrom(entity)
else:
entity = entity_iter.next()
if entity.entity().IsInitialized():
to_put[encoded_key] = entity.entity()
response.add_entity().CopyFrom(entity)
if to_put:
memcache.set_multi(to_put)
def _Dynamic_Put(self, request, response):
"""Intercepts puts and adds them to the cache."""
self.CallWrappedStub('Put', request, response)
# If this is in a transaction we mark these entries for deletion
# when and if the transaction commits.
if request.has_transaction():
to_delete = [k.Encode() for k in response.key_list()]
self.local.to_delete[request.transaction().handle()].extend(to_delete)
return
to_put = dict()
for e, k in itertools.izip(request.entity_list(), response.key_list()):
e.key().CopyFrom(k)
to_put[k.Encode()] = e
if to_put:
memcache.set_multi(to_put)
def _Dynamic_Delete(self, request, response):
"""Intercepts deletes and deletes entries from the cache."""
self.CallWrappedStub('Delete', request, response)
to_delete = [k.Encode() for k in request.key_list()]
# If this is in a transaction we mark these entries for deletion
# when and if the transaction commits.
if request.has_transaction():
self.local.to_delete[request.transaction().handle()].extend(to_delete)
return
memcache.delete_multi(to_delete)
def _Dynamic_RunQuery(self, query, query_result):
"""Intercepts query results and caches the returned entities."""
if query.has_transaction():
self.CallWrappedStub('RunQuery', query, query_result)
return
key = 'RunQuery:' + query.Encode()
# For the small applictions, using hash() is also suitable and more efficient:
###############################################################
# key = 'RunQuery:' + str(hash(query.Encode()))
###############################################################
# if you are worry about hash collision, you can use md5, sha or any other hash algorithm instead:
###############################################################
# import hashlib
# key = 'RunQuery:' + hashlib.md5(query.Encode()).hexdigest()
###############################################################
results = memcache.get(key)
if results is None:
self.CallWrappedStub('RunQuery', query, query_result)
memcache.set(key, query_result, _QUERY_CACHE_TIME)
else:
query_result.MergeFrom(results)
def _Dynamic_Next(self, request, response):
"""Intercepts the next batch of results and caches the returned entities."""
self.CallWrappedStub('Next', request, response)
to_put = dict([(e.key().Encode(), e) for e in response.result_list()])
memcache.set_multi(to_put)
def _Dynamic_BeginTransaction(self, request, transaction):
"""Intercepts the beginning of transactions and creates thread local storage for deletions"""
self.CallWrappedStub('BeginTransaction', request, transaction)
self.local.to_delete[transaction.handle()] = []
def _Dynamic_Commit(self, transaction, transaction_response):
"""Intercepts the commit of transactions and deletes all entities that were modified/delete by this transaction"""
# We delete from cache before we commit otherwise we have a race condition.
to_delete = self.local.to_delete[transaction.handle()]
if to_delete:
memcache.delete_multi(to_delete)
del self.local.to_delete[transaction.handle()]
self.CallWrappedStub('Commit', transaction, transaction_response)
def _Dynamic_Rollback(self, transaction, transaction_response):
"""Intercepts the rollback of transactions and clears the thread local storage for them"""
del self.local.to_delete[transaction.handle()]
self.CallWrappedStub('Rollback', transaction, transaction_response)
| Python |
import os
from models import *
from utilities import *
from google.appengine.api import users
import UserDict
from Cookie import BaseCookie
from google.appengine.ext import webapp
from django.utils import translation
import hashlib
class LoginControl():
def Login(self, user, password, externalself, timealive):
logged = False
queryuser = CustomUser.all()
queryuser.filter(' username = ', user)
passwordencryptor = hashlib.md5()
passwordencryptor.update(password)
encpassword = passwordencryptor.hexdigest()
queryuser.filter(' password = ', encpassword)
dbusers = queryuser.fetch(1)
if len(dbusers) > 0:
logged = True
dbuser = dbusers[0]
dbuser.put()
sessionid = str(dbuser.key())
cookies = Cookies(externalself, max_age = timealive)
cookies['sessionid'] = str(dbuser.key())
if timealive > 300:
cookies['remember'] = '1'
else:
queryuser = CustomUser.all()
queryuser.filter(' email = ', user)
passwordencryptor = hashlib.md5()
passwordencryptor.update(password)
encpassword = passwordencryptor.hexdigest()
cookies = Cookies(externalself, max_age = timealive)
queryuser.filter(' password = ', encpassword)
dbusers = queryuser.fetch(1)
if len(dbusers) > 0:
logged = True
dbuser = dbusers[0]
cookies['sessionid'] = str(dbuser.key())
if timealive > 300:
cookies['remember'] = '1'
else:
logged = False
if logged:
return dbuser
else:
return 0
def LoginAuxiliar(self, useremail, tmppassword, externalself, timealive):
expiredtimedelta = timedelta(hours = 24)
expireddate = datetime.today() - expiredtimedelta
querycustomusers = CustomUser.all()
querycustomusers.filter(' tmppass = ' , tmppassword)
querycustomusers.filter(' email = ' , useremail)
querycustomusers.filter(' expire_date > ' , expireddate)
customusers = querycustomusers.fetch(1)
if len(customusers) > 0:
customuser = customusers[0]
customuser.tmppass = str(random.randint(0,9999999))
customuser.put()
sessionid = str(customuser.key())
cookies = Cookies(externalself, max_age = timealive)
cookies['sessionid'] = sessionid
return customuser
else:
return 0
def Logout(self, externalself):
cookies = Cookies(externalself, max_age = -100)
if cookies.has_key('sessionid'):
sessionid = cookies['sessionid']
del cookies['sessionid']
cookies['sessionid'] = -1
if cookies.has_key('remember'):
del cookies['remember']
cookies['remember'] = -1
externalself.redirect("http://wolpyweb.appspot.com/login")
return 0
def CheckLogin(self,externalself):
cookies = Cookies(externalself, max_age = 300)
dbuser = 0
if cookies.has_key('remember'):
if cookies.has_key('sessionid'):
sessionid = cookies['sessionid']
dbuser = CustomUser.get(sessionid)
if dbuser:
return dbuser
else:
sessionid = '-1'
logged = False
else:
sessionid = '-1'
logged = False
if not cookies.has_key('sessionid'):
sessionid = '-1'
logged = False
else:
sessionid = str(cookies['sessionid'])
logged = False
if sessionid != '-1':
if dbuser:
return dbuser
else:
logged = False
if not logged:
return 0
def RegenerateTmppass(self,dbuser):
dbuser.tmppass = str(random.randint(0,9999999))
dbuser.put() | Python |
def list_converter(Tags):
if isinstance(Tags, basestring) and len(Tags) > 0:
Tags = Tags[1:]
Tags = Tags[:-1]
tagsList = Tags.split('><')
return tagsList
else:
tagsList = []
return tagsList
| Python |
#!/usr/bin/env python
from distutils.core import setup
setup(name='textile',
version='2.1.2',
description='This is Textile. A Humane Web Text Generator.',
author='Jason Samsa',
author_email='jsamsa@gmail.com',
url='http://loopcore.com/python-textile/',
py_modules=['textile'],
platforms = ['any'],
license = ['BSD'],
long_description = """Textile is a XHTML generator using a simple markup developed by Dean Allen."""
)
| Python |
#!/usr/bin/env python
"""
PyTextile
A Humane Web Text Generator
"""
__version__ = '2.1.2'
__date__ = '2008/11/30'
__copyright__ = """
Copyright (c) 2008, Jason Samsa, http://jsamsa.com/
Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
Original PHP Version:
Copyright (c) 2003-2004, Dean Allen <dean@textism.com>
All rights reserved.
Thanks to Carlo Zottmann <carlo@g-blog.net> for refactoring
Textile's procedural code into a class framework
Additions and fixes Copyright (c) 2006 Alex Shiels http://thresholdstate.com/
"""
__license__ = """
L I C E N S E
=============
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Textile nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import uuid
from urlparse import urlparse
import sgmllib
def _normalize_newlines(string):
import re
return re.sub(r'(\r\n|\r|\n)', '\n', string)
# PyTextile can optionally sanitize the generated XHTML,
# which is good for weblog comments. This code is from
# Mark Pilgrim's feedparser.
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v).strip()) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")]
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
if tag in self.elements_no_end_tag:
self.pieces.append("<%(tag)s%(strattrs)s />" % locals())
else:
self.pieces.append("<%(tag)s%(strattrs)s>" % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be "pre"
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for " ", ref will be "160"
# Reconstruct the original character reference.
self.pieces.append("&#%(ref)s;" % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for "©", ref will be "copy"
# Reconstruct the original entity reference.
self.pieces.append("&%(ref)s;" % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append("<!--%(text)s-->" % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append("<?%(text)s>" % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append("<!%(text)s>" % locals())
def output(self):
"""Return processed HTML as a single string"""
return "".join(self.pieces)
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
# This if for MathML.
mathml_elements = ['math', 'mi', 'mn', 'mo', 'mrow', 'msup']
mathml_attributes = ['mode', 'xmlns']
acceptable_elements = acceptable_elements + mathml_elements
acceptable_attributes = acceptable_attributes + mathml_attributes
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
# PyTextile can optionally validate the generated
# XHTML code using either mxTidy or uTidyLib.
try:
# This is mxTidy.
from mx.Tidy import Tidy
def _tidy1(text):
"""mxTidy's XHTML validator.
This function is a wrapper to mxTidy's validator.
"""
nerrors, nwarnings, text, errortext = Tidy.tidy(text, output_xhtml=1, numeric_entities=1, wrap=0)
return _in_tag(text, 'body')
_tidy = _tidy1
except ImportError:
try:
# This is uTidyLib.
import tidy
def _tidy2(text):
"""uTidyLib's XHTML validator.
This function is a wrapper to uTidyLib's validator.
"""
text = tidy.parseString(text, output_xhtml=1, add_xml_decl=0, indent=0, tidy_mark=0)
return _in_tag(str(text), 'body')
_tidy = _tidy2
except ImportError:
_tidy = None
class Textile(object):
hlgn = r'(?:\<(?!>)|(?<!<)\>|\<\>|\=|[()]+(?! ))'
vlgn = r'[\-^~]'
clas = r'(?:\([^)]+\))'
lnge = r'(?:\[[^\]]+\])'
styl = r'(?:\{[^}]+\})'
cspn = r'(?:\\\\\d+)'
rspn = r'(?:\/\d+)'
a = r'(?:%s|%s)*' % (hlgn, vlgn)
s = r'(?:%s|%s)*' % (cspn, rspn)
c = r'(?:%s)*' % '|'.join([clas, styl, lnge, hlgn])
pnct = r'[-!"#$%&()*+,/:;<=>?@\'\[\\\]\.^_`{|}~]'
# urlch = r'[\w"$\-_.+!*\'(),";/?:@=&%#{}|\\^~\[\]`]'
urlch = '[\w"$\-_.+*\'(),";\/?:@=&%#{}|\\^~\[\]`]'
url_schemes = ('http','https','ftp','mailto')
btag = ('bq', 'bc', 'notextile', 'pre', 'h[1-6]', 'fn\d+', 'p')
noimage = False
hu = ''
glyph_defaults = (
('txt_quote_single_open', '‘'),
('txt_quote_single_close', '’'),
('txt_quote_double_open', '“'),
('txt_quote_double_close', '”'),
('txt_apostrophe', '’'),
('txt_prime', '′'),
('txt_prime_double', '″'),
('txt_ellipsis', '…'),
('txt_emdash', '—'),
('txt_endash', '–'),
('txt_dimension', '×'),
('txt_trademark', '™'),
('txt_registered', '®'),
('txt_copyright', '©'),
)
def __init__(self, restricted=False, lite=False):
"""docstring for __init__"""
self.restricted = restricted
self.lite = lite
self.fn = {}
self.urlrefs = {}
self.shelf = {}
self.rel = ''
def textile(self, text, rel=None, encoding='utf8', output='utf8', validate=False, sanitize=False, head_offset='ignored'):
"""
>>> import textile
>>> textile.textile('some textile')
'\\t<p>some textile</p>'
"""
text = _normalize_newlines(text)
if rel:
self.rel = ' rel="%s"' % rel
text = self.getRefs(text)
if not self.lite:
text = self.block(text)
text = self.retrieve(text)
# Convert to desired output.
if isinstance(text, str):
text = unicode(text, encoding)
text = text.encode(output, 'xmlcharrefreplace')
# Sanitize?
if sanitize:
p = _HTMLSanitizer()
p.feed(text)
text = p.output()
# Validate output.
if _tidy and validate:
text = _tidy(text)
return text
def pba(self, input, element=None):
"""
>>> t = Textile()
>>> t.pba(r'\3')
''
>>> t.pba(r'\\3', element='td')
' colspan="3"'
>>> t.pba(r'/4', element='td')
' rowspan="4"'
>>> t.pba(r'\\3/4', element='td')
' colspan="3" rowspan="4"'
>>> t.vAlign('^')
'top'
>>> t.pba('^', element='td')
' style="vertical-align:top;"'
>>> t.pba('{line-height:18px}')
' style="line-height:18px;"'
>>> t.pba('(foo-bar)')
' class="foo-bar"'
>>> t.pba('(#myid)')
' id="myid"'
>>> t.pba('(foo-bar#myid)')
' class="foo-bar" id="myid"'
>>> t.pba('((((')
' style="padding-left:4em;"'
>>> t.pba(')))')
' style="padding-right:3em;"'
>>> t.pba('[fr]')
' lang="fr"'
"""
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
id = ''
atts = ''
if not input: return ''
matched = input
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vlgn, matched)
if m: style.append("vertical-align:%s;" % self.vAlign(m.group(1)))
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.hlgn, matched)
if m:
style.append("text-align:%s;" % self.hAlign(m.group(1)))
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang: return ' lang="%s"'
else: return ''
result = []
if style: result.append(' style="%s"' % "".join(style))
if aclass: result.append(' class="%s"' % aclass)
if lang: result.append(' lang="%s"' % lang)
if id: result.append(' id="%s"' % id)
if colspan: result.append(' colspan="%s"' % colspan)
if rowspan: result.append(' rowspan="%s"' % rowspan)
return ''.join(result)
def hasRawText(self, text):
"""
checks whether the text has text not already enclosed by a block tag
>>> t = Textile()
>>> t.hasRawText('<p>foo bar biz baz</p>')
False
>>> t.hasRawText(' why yes, yes it does')
True
"""
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r
def table(self, text):
r"""
>>> t = Textile()
>>> t.table('|one|two|three|\n|a|b|c|')
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
"""
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
return pattern.sub(self.fTable, text)
def fTable(self, match):
tatts = self.pba(match.group(1), 'table')
rows = []
for row in [ x for x in match.group(2).split('\n') if x]:
rmtch = re.search(r'^(%s%s\. )(.*)' % (self.a, self.c), row.lstrip())
if rmtch:
ratts = self.pba(rmtch.group(1), 'tr')
row = rmtch.group(2)
else: ratts = ''
cells = []
for cell in row.split('|'):
ctyp = 'd'
if re.search(r'^_', cell): ctyp = "h"
cmtch = re.search(r'^(_?%s%s%s\. )(.*)' % (self.s, self.a, self.c), cell)
if cmtch:
catts = self.pba(cmtch.group(1), 'td')
cell = cmtch.group(2)
else: catts = ''
cell = self.graf(self.span(cell))
if cell.strip() != '':
cells.append('\t\t\t<t%s%s>%s</t%s>' % (ctyp, catts, cell, ctyp))
rows.append("\t\t<tr%s>\n%s\n\t\t</tr>" % (ratts, '\n'.join(cells)))
cells = []
catts = None
return "\t<table%s>\n%s\n\t</table>\n\n" % (tatts, '\n'.join(rows))
def lists(self, text):
"""
>>> t = Textile()
>>> t.lists("* one\\n* two\\n* three")
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
"""
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
return pattern.sub(self.fList, text)
def fList(self, match):
text = match.group(0).split("\n")
result = []
lists = []
for i, line in enumerate(text):
try:
nextline = text[i+1]
except IndexError:
nextline = ''
m = re.search(r"^([#*]+)(%s%s) (.*)$" % (self.a, self.c), line, re.S)
if m:
tl, atts, content = m.groups()
nl = ''
nm = re.search(r'^([#*]+)\s.*', nextline)
if nm:
nl = nm.group(1)
if tl not in lists:
lists.append(tl)
atts = self.pba(atts)
line = "\t<%sl%s>\n\t\t<li>%s" % (self.lT(tl), atts, self.graf(content))
else:
line = "\t\t<li>" + self.graf(content)
if len(nl) <= len(tl): line = line + "</li>"
for k in reversed(lists):
if len(k) > len(nl):
line = line + "\n\t</%sl>" % self.lT(k)
if len(k) > 1:
line = line + "</li>"
lists.remove(k)
result.append(line)
return "\n".join(result)
def lT(self, input):
if re.search(r'^#+', input):
return 'o'
else:
return 'u'
def doPBr(self, in_):
return re.compile(r'<(p)([^>]*?)>(.*)(</\1>)', re.S).sub(self.doBr, in_)
def doBr(self, match):
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br />', match.group(3))
return '<%s%s>%s%s' % (match.group(1), match.group(2), content, match.group(4))
def block(self, text):
"""
>>> t = Textile()
>>> t.block('h1. foobar baby')
'\\t<h1>foobar baby</h1>'
"""
tre = '|'.join(self.btag)
text = text.split('\n\n')
tag = 'p'
atts = cite = graf = ext = ''
out = []
anon = False
for line in text:
pattern = r'^(%s)(%s%s)\.(\.?)(?::(\S+))? (.*)$' % (tre, self.a, self.c)
match = re.search(pattern, line, re.S)
if match:
if ext:
out.append(out.pop() + c1)
tag,atts,ext,cite,graf = match.groups()
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, graf)
# leave off c1 if this block is extended, we'll close it at the start of the next block
if ext:
line = "%s%s%s%s" % (o1, o2, content, c2)
else:
line = "%s%s%s%s%s" % (o1, o2, content, c2, c1)
else:
anon = True
if ext or not re.search(r'^\s', line):
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, line)
# skip $o1/$c1 because this is part of a continuing extended block
if tag == 'p' and not self.hasRawText(content):
line = content
else:
line = "%s%s%s" % (o2, content, c2)
else:
line = self.graf(line)
line = self.doPBr(line)
line = re.sub(r'<br>', '<br />', line)
if ext and anon:
out.append(out.pop() + "\n" + line)
else:
out.append(line)
if not ext:
tag = 'p'
atts = ''
cite = ''
graf = ''
if ext:
out.append(out.pop() + c1)
return '\n\n'.join(out)
def fBlock(self, tag, atts, ext, cite, content):
"""
>>> t = Textile()
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bq", "", None, "http://google.com", "Hello BlockQuote")
('\\t<blockquote cite="http://google.com">\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bc", "", None, "", 'printf "Hello, World";') # doctest: +ELLIPSIS
('<pre>', '<code>', ..., '</code>', '</pre>')
>>> t.fBlock("h1", "", None, "", "foobar")
('', '\\t<h1>', 'foobar', '</h1>', '')
"""
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = "\t<blockquote%s%s>\n" % (cite, atts)
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = "\t<%s%s>" % (tag, atts)
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1
def footnoteRef(self, text):
"""
>>> t = Textile()
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
"""
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
def footnoteID(self, match):
id, t = match.groups()
if id not in self.fn:
self.fn[id] = str(uuid.uuid4())
fnid = self.fn[id]
if not t: t = ''
return '<sup class="footnote"><a href="#fn%s">%s</a></sup>%s' % (fnid, id, t)
def glyphs(self, text):
"""
>>> t = Textile()
>>> t.glyphs("apostrophe's")
'apostrophe’s'
>>> t.glyphs("back in '88")
'back in ’88'
>>> t.glyphs('foo ...')
'foo …'
>>> t.glyphs('--')
'—'
>>> t.glyphs('FooBar[tm]')
'FooBar™'
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
"""
# fix: hackish
text = re.sub(r'"\z', '\" ', text)
glyph_search = (
re.compile(r"(\w)\'(\w)"), # apostrophe's
re.compile(r'(\s)\'(\d+\w?)\b(?!\')'), # back in '88
re.compile(r'(\S)\'(?=\s|'+self.pnct+'|<|$)'), # single closing
re.compile(r'\'/'), # single opening
re.compile(r'(\S)\"(?=\s|'+self.pnct+'|<|$)'), # double closing
re.compile(r'"'), # double opening
re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), # 3+ uppercase acronym
re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), # 3+ uppercase
re.compile(r'\b(\s{0,1})?\.{3}'), # ellipsis
re.compile(r'(\s?)--(\s?)'), # em dash
re.compile(r'\s-(?:\s|$)'), # en dash
re.compile(r'(\d+)( ?)x( ?)(?=\d+)'), # dimension sign
re.compile(r'\b ?[([]TM[])]', re.I), # trademark
re.compile(r'\b ?[([]R[])]', re.I), # registered
re.compile(r'\b ?[([]C[])]', re.I), # copyright
)
glyph_replace = [x % dict(self.glyph_defaults) for x in (
r'\1%(txt_apostrophe)s\2', # apostrophe's
r'\1%(txt_apostrophe)s\2', # back in '88
r'\1%(txt_quote_single_close)s', # single closing
r'%(txt_quote_single_open)s', # single opening
r'\1%(txt_quote_double_close)s', # double closing
r'%(txt_quote_double_open)s', # double opening
r'<acronym title="\2">\1</acronym>', # 3+ uppercase acronym
r'<span class="caps">\1</span>', # 3+ uppercase
r'\1%(txt_ellipsis)s', # ellipsis
r'\1%(txt_emdash)s\2', # em dash
r' %(txt_endash)s ', # en dash
r'\1\2%(txt_dimension)s\3', # dimension sign
r'%(txt_trademark)s', # trademark
r'%(txt_registered)s', # registered
r'%(txt_copyright)s', # copyright
)]
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
for s, r in zip(glyph_search, glyph_replace):
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def iAlign(self, input):
d = {'<':'left', '=':'center', '>':'right'}
return d.get(input, '')
def vAlign(self, input):
d = {'^':'top', '-':'middle', '~':'bottom'}
return d.get(input, '')
def hAlign(self, input):
d = {'<':'left', '=':'center', '>':'right', '<>': 'justify'}
return d.get(input, '')
def getRefs(self, text):
"""
what is this for?
"""
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http:\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
def checkRefs(self, url):
return self.urlrefs.get(url, url)
def relURL(self, url):
o = urlparse(url)
if (not o.scheme or o.scheme == 'http') and not o.netloc and re.search(r'^\w', o.path):
url = self.hu + url
if self.restricted and o.scheme and o.scheme not in self.url_schemes:
return '#'
return url
def shelve(self, text):
id = str(uuid.uuid4())
self.shelf[id] = text
return id
def retrieve(self, text):
"""
>>> t = Textile()
>>> id = t.shelve("foobar")
>>> t.retrieve(id)
'foobar'
"""
while True:
old = text
for k,v in self.shelf.items():
text = text.replace(k,v)
if text == old: break
return text
def encode_html(self, text, quotes=True):
a = (
('&', '&'),
('<', '<'),
('>', '>')
)
if quotes:
a = a + (
("'", '''),
('"', '"')
)
for k,v in a:
text = text.replace(k,v)
return text
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.lists(text)
text = self.table(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
def links(self, text):
"""
>>> t = Textile()
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
'fooobar ... and hello world ...'
"""
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
pattern = r'''
([\s\[{(]|[%s])? # $pre
" # start
(%s) # $atts
([^"]+?) # $text
\s?
(?:\(([^)]+?)\)(?="))? # $title
":
(\S+?) # $url
(\/)? # $slash
([^\w\/;]*?) # $post
(?=<|\s|$)
''' % (re.escape(punct), self.c)
text = re.compile(pattern, re.X).sub(self.fLink, text)
return text
def fLink(self, match):
pre, atts, text, title, url, slash, post = match.groups()
# print "## ", zip("pre, atts, text, title, url, slash, post".split(","), match.groups())
# print
if pre == None:
pre = ''
url = self.checkRefs(url)
atts = self.pba(atts)
if title: atts = atts + ' title="%s"' % self.encode_html(title)
if not self.noimage:
text = self.image(text)
text = self.span(text)
text = self.glyphs(text)
url = self.relURL(url)
if slash: url = url + slash
out = '<a href="%s"%s%s>%s</a>' % (self.encode_html(url), atts, self.rel, text)
out = self.shelve(out)
return ''.join([pre, out, post])
def span(self, text):
"""
>>> t = Textile()
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
"""
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:"
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s])|([\]}]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::(\S+))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag':qtag,'c':self.c,'pnct':pnct,'selfpnct':self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text
def fSpan(self, match):
_, tag, atts, cite, content, end, _ = match.groups()
qtags = {
'*': 'strong',
'**': 'b',
'??': 'cite',
'_' : 'em',
'__': 'i',
'-' : 'del',
'%' : 'span',
'+' : 'ins',
'~' : 'sub',
'^' : 'sup'
}
tag = qtags[tag]
atts = self.pba(atts)
if cite:
atts = atts + 'cite="%s"' % cite
out = "<%s%s>%s%s</%s>" % (tag, atts, content, end, tag)
return out;
def image(self, text):
"""
>>> t = Textile()
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
"""
pattern = re.compile(r"""
(?:[\[{])? # pre
\! # opening !
(\<|\=|\>)?? # optional alignment atts
(%s) # optional style,class atts
(?:\. )? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}]|(?=\s|$)) # lookahead: space or end of string
""" % self.c, re.U|re.X)
return pattern.sub(self.fImage, text)
def fImage(self, match):
# (None, '', '/imgs/myphoto.jpg', None, None)
algn, atts, url, title, href = match.groups()
atts = self.pba(atts)
if algn:
atts = atts + ' align="%s"' % self.iAlign(algn)
if title:
atts = atts + ' title="%s" alt="%s"' % (title, title)
else:
atts = atts + ' alt=""'
# TODO how to do this in python?
# size = @getimagesize(url)
# if (size) atts .= " size[3]"
if href:
href = self.checkRefs(href)
url = self.checkRefs(url)
url = self.relURL(url)
out = []
if href: out.append('<a href="%s">' % href)
out.append('<img src="%s"%s />' % (url, atts))
if href: out.append('</a>')
return ''.join(out)
def code(self, text):
text = self.doSpecial(text, '<code>', '</code>', self.fCode)
text = self.doSpecial(text, '@', '@', self.fCode)
text = self.doSpecial(text, '<pre>', '</pre>', self.fPre)
return text
def fCode(self, match):
before, text, after = match.groups()
if after == None: after = ''
# text needs to be escaped
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, self.shelve('<code>%s</code>' % text), after])
def fPre(self, match):
before, text, after = match.groups()
if after == None: after = ''
# text needs to be escapedd
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, '<pre>', self.shelve(text), '</pre>', after])
def doSpecial(self, text, start, end, method=None):
if method == None:
method = self.fSpecial
pattern = re.compile(r'(^|\s|[\[({>])%s(.*?)%s(\s|$|[\])}])?' % (re.escape(start), re.escape(end)), re.M|re.S)
return pattern.sub(method, text)
def fSpecial(self, match):
"""
special blocks like notextile or code
"""
before, text, after = match.groups()
if after == None: after = ''
return ''.join([before, self.shelve(self.encode_html(text)), after])
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>', self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
def fTextile(self, match):
before, notextile, after = match.groups()
if after == None: after = ''
return ''.join([before, self.shelve(notextile), after])
def textile(text, **args):
"""
this function takes additional parameters:
encoding - input encoding (default: 'utf-8')
output - output encoding (default: 'utf-8')
validate - perform mxTidy or uTidyLib validation (default: False)
sanitize - sanitize output good for weblog comments (default: False)
head_offset - ignored
"""
return Textile().textile(text, **args)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
f = open(sys.argv[1])
text = ''.join(f.readlines())
print Textile().textile(text)
else:
_test()
| Python |
#!/usr/bin/env python
from distutils.core import setup
setup(name='textile',
version='2.1.2',
description='This is Textile. A Humane Web Text Generator.',
author='Jason Samsa',
author_email='jsamsa@gmail.com',
url='http://loopcore.com/python-textile/',
py_modules=['textile'],
platforms = ['any'],
license = ['BSD'],
long_description = """Textile is a XHTML generator using a simple markup developed by Dean Allen."""
)
| Python |
#!/usr/bin/env python
"""
PyTextile
A Humane Web Text Generator
"""
__version__ = '2.1.2'
__date__ = '2008/11/30'
__copyright__ = """
Copyright (c) 2008, Jason Samsa, http://jsamsa.com/
Copyright (c) 2004, Roberto A. F. De Almeida, http://dealmeida.net/
Copyright (c) 2003, Mark Pilgrim, http://diveintomark.org/
Original PHP Version:
Copyright (c) 2003-2004, Dean Allen <dean@textism.com>
All rights reserved.
Thanks to Carlo Zottmann <carlo@g-blog.net> for refactoring
Textile's procedural code into a class framework
Additions and fixes Copyright (c) 2006 Alex Shiels http://thresholdstate.com/
"""
__license__ = """
L I C E N S E
=============
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name Textile nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import re
import uuid
from urlparse import urlparse
import sgmllib
def _normalize_newlines(string):
import re
return re.sub(r'(\r\n|\r|\n)', '\n', string)
# PyTextile can optionally sanitize the generated XHTML,
# which is good for weblog comments. This code is from
# Mark Pilgrim's feedparser.
class _BaseHTMLProcessor(sgmllib.SGMLParser):
elements_no_end_tag = ['area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param']
def __init__(self):
sgmllib.SGMLParser.__init__(self)
def reset(self):
self.pieces = []
sgmllib.SGMLParser.reset(self)
def normalize_attrs(self, attrs):
# utility method to be called by descendants
attrs = [(k.lower(), sgmllib.charref.sub(lambda m: unichr(int(m.groups()[0])), v).strip()) for k, v in attrs]
attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs]
return attrs
def unknown_starttag(self, tag, attrs):
# called for each start tag
# attrs is a list of (attr, value) tuples
# e.g. for <pre class="screen">, tag="pre", attrs=[("class", "screen")]
strattrs = "".join([' %s="%s"' % (key, value) for key, value in attrs])
if tag in self.elements_no_end_tag:
self.pieces.append("<%(tag)s%(strattrs)s />" % locals())
else:
self.pieces.append("<%(tag)s%(strattrs)s>" % locals())
def unknown_endtag(self, tag):
# called for each end tag, e.g. for </pre>, tag will be "pre"
# Reconstruct the original end tag.
if tag not in self.elements_no_end_tag:
self.pieces.append("</%(tag)s>" % locals())
def handle_charref(self, ref):
# called for each character reference, e.g. for " ", ref will be "160"
# Reconstruct the original character reference.
self.pieces.append("&#%(ref)s;" % locals())
def handle_entityref(self, ref):
# called for each entity reference, e.g. for "©", ref will be "copy"
# Reconstruct the original entity reference.
self.pieces.append("&%(ref)s;" % locals())
def handle_data(self, text):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
# Store the original text verbatim.
self.pieces.append(text)
def handle_comment(self, text):
# called for each HTML comment, e.g. <!-- insert Javascript code here -->
# Reconstruct the original comment.
self.pieces.append("<!--%(text)s-->" % locals())
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
# Reconstruct original processing instruction.
self.pieces.append("<?%(text)s>" % locals())
def handle_decl(self, text):
# called for the DOCTYPE, if present, e.g.
# <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
# "http://www.w3.org/TR/html4/loose.dtd">
# Reconstruct original DOCTYPE
self.pieces.append("<!%(text)s>" % locals())
def output(self):
"""Return processed HTML as a single string"""
return "".join(self.pieces)
class _HTMLSanitizer(_BaseHTMLProcessor):
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', 'b', 'big',
'blockquote', 'br', 'button', 'caption', 'center', 'cite', 'code', 'col',
'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt', 'em', 'fieldset',
'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input',
'ins', 'kbd', 'label', 'legend', 'li', 'map', 'menu', 'ol', 'optgroup',
'option', 'p', 'pre', 'q', 's', 'samp', 'select', 'small', 'span', 'strike',
'strong', 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'tfoot', 'th',
'thead', 'tr', 'tt', 'u', 'ul', 'var']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'border', 'cellpadding', 'cellspacing',
'char', 'charoff', 'charset', 'checked', 'cite', 'class', 'clear', 'cols',
'colspan', 'color', 'compact', 'coords', 'datetime', 'dir', 'disabled',
'enctype', 'for', 'frame', 'headers', 'height', 'href', 'hreflang', 'hspace',
'id', 'ismap', 'label', 'lang', 'longdesc', 'maxlength', 'media', 'method',
'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'prompt', 'readonly',
'rel', 'rev', 'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title', 'type',
'usemap', 'valign', 'value', 'vspace', 'width']
unacceptable_elements_with_end_tag = ['script', 'applet']
# This if for MathML.
mathml_elements = ['math', 'mi', 'mn', 'mo', 'mrow', 'msup']
mathml_attributes = ['mode', 'xmlns']
acceptable_elements = acceptable_elements + mathml_elements
acceptable_attributes = acceptable_attributes + mathml_attributes
def reset(self):
_BaseHTMLProcessor.reset(self)
self.unacceptablestack = 0
def unknown_starttag(self, tag, attrs):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack += 1
return
attrs = self.normalize_attrs(attrs)
attrs = [(key, value) for key, value in attrs if key in self.acceptable_attributes]
_BaseHTMLProcessor.unknown_starttag(self, tag, attrs)
def unknown_endtag(self, tag):
if not tag in self.acceptable_elements:
if tag in self.unacceptable_elements_with_end_tag:
self.unacceptablestack -= 1
return
_BaseHTMLProcessor.unknown_endtag(self, tag)
def handle_pi(self, text):
pass
def handle_decl(self, text):
pass
def handle_data(self, text):
if not self.unacceptablestack:
_BaseHTMLProcessor.handle_data(self, text)
# PyTextile can optionally validate the generated
# XHTML code using either mxTidy or uTidyLib.
try:
# This is mxTidy.
from mx.Tidy import Tidy
def _tidy1(text):
"""mxTidy's XHTML validator.
This function is a wrapper to mxTidy's validator.
"""
nerrors, nwarnings, text, errortext = Tidy.tidy(text, output_xhtml=1, numeric_entities=1, wrap=0)
return _in_tag(text, 'body')
_tidy = _tidy1
except ImportError:
try:
# This is uTidyLib.
import tidy
def _tidy2(text):
"""uTidyLib's XHTML validator.
This function is a wrapper to uTidyLib's validator.
"""
text = tidy.parseString(text, output_xhtml=1, add_xml_decl=0, indent=0, tidy_mark=0)
return _in_tag(str(text), 'body')
_tidy = _tidy2
except ImportError:
_tidy = None
class Textile(object):
hlgn = r'(?:\<(?!>)|(?<!<)\>|\<\>|\=|[()]+(?! ))'
vlgn = r'[\-^~]'
clas = r'(?:\([^)]+\))'
lnge = r'(?:\[[^\]]+\])'
styl = r'(?:\{[^}]+\})'
cspn = r'(?:\\\\\d+)'
rspn = r'(?:\/\d+)'
a = r'(?:%s|%s)*' % (hlgn, vlgn)
s = r'(?:%s|%s)*' % (cspn, rspn)
c = r'(?:%s)*' % '|'.join([clas, styl, lnge, hlgn])
pnct = r'[-!"#$%&()*+,/:;<=>?@\'\[\\\]\.^_`{|}~]'
# urlch = r'[\w"$\-_.+!*\'(),";/?:@=&%#{}|\\^~\[\]`]'
urlch = '[\w"$\-_.+*\'(),";\/?:@=&%#{}|\\^~\[\]`]'
url_schemes = ('http','https','ftp','mailto')
btag = ('bq', 'bc', 'notextile', 'pre', 'h[1-6]', 'fn\d+', 'p')
noimage = False
hu = ''
glyph_defaults = (
('txt_quote_single_open', '‘'),
('txt_quote_single_close', '’'),
('txt_quote_double_open', '“'),
('txt_quote_double_close', '”'),
('txt_apostrophe', '’'),
('txt_prime', '′'),
('txt_prime_double', '″'),
('txt_ellipsis', '…'),
('txt_emdash', '—'),
('txt_endash', '–'),
('txt_dimension', '×'),
('txt_trademark', '™'),
('txt_registered', '®'),
('txt_copyright', '©'),
)
def __init__(self, restricted=False, lite=False):
"""docstring for __init__"""
self.restricted = restricted
self.lite = lite
self.fn = {}
self.urlrefs = {}
self.shelf = {}
self.rel = ''
def textile(self, text, rel=None, encoding='utf8', output='utf8', validate=False, sanitize=False, head_offset='ignored'):
"""
>>> import textile
>>> textile.textile('some textile')
'\\t<p>some textile</p>'
"""
text = _normalize_newlines(text)
if rel:
self.rel = ' rel="%s"' % rel
text = self.getRefs(text)
if not self.lite:
text = self.block(text)
text = self.retrieve(text)
# Convert to desired output.
if isinstance(text, str):
text = unicode(text, encoding)
text = text.encode(output, 'xmlcharrefreplace')
# Sanitize?
if sanitize:
p = _HTMLSanitizer()
p.feed(text)
text = p.output()
# Validate output.
if _tidy and validate:
text = _tidy(text)
return text
def pba(self, input, element=None):
"""
>>> t = Textile()
>>> t.pba(r'\3')
''
>>> t.pba(r'\\3', element='td')
' colspan="3"'
>>> t.pba(r'/4', element='td')
' rowspan="4"'
>>> t.pba(r'\\3/4', element='td')
' colspan="3" rowspan="4"'
>>> t.vAlign('^')
'top'
>>> t.pba('^', element='td')
' style="vertical-align:top;"'
>>> t.pba('{line-height:18px}')
' style="line-height:18px;"'
>>> t.pba('(foo-bar)')
' class="foo-bar"'
>>> t.pba('(#myid)')
' id="myid"'
>>> t.pba('(foo-bar#myid)')
' class="foo-bar" id="myid"'
>>> t.pba('((((')
' style="padding-left:4em;"'
>>> t.pba(')))')
' style="padding-right:3em;"'
>>> t.pba('[fr]')
' lang="fr"'
"""
style = []
aclass = ''
lang = ''
colspan = ''
rowspan = ''
id = ''
atts = ''
if not input: return ''
matched = input
if element == 'td':
m = re.search(r'\\(\d+)', matched)
if m:
colspan = m.group(1)
m = re.search(r'/(\d+)', matched)
if m:
rowspan = m.group(1)
if element == 'td' or element == 'tr':
m = re.search(r'(%s)' % self.vlgn, matched)
if m: style.append("vertical-align:%s;" % self.vAlign(m.group(1)))
m = re.search(r'\{([^}]*)\}', matched)
if m:
style.append(m.group(1).rstrip(';') + ';')
matched = matched.replace(m.group(0), '')
m = re.search(r'\[([^\]]+)\]', matched, re.U)
if m:
lang = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'\(([^()]+)\)', matched, re.U)
if m:
aclass = m.group(1)
matched = matched.replace(m.group(0), '')
m = re.search(r'([(]+)', matched)
if m:
style.append("padding-left:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'([)]+)', matched)
if m:
style.append("padding-right:%sem;" % len(m.group(1)))
matched = matched.replace(m.group(0), '')
m = re.search(r'(%s)' % self.hlgn, matched)
if m:
style.append("text-align:%s;" % self.hAlign(m.group(1)))
m = re.search(r'^(.*)#(.*)$', aclass)
if m:
id = m.group(2)
aclass = m.group(1)
if self.restricted:
if lang: return ' lang="%s"'
else: return ''
result = []
if style: result.append(' style="%s"' % "".join(style))
if aclass: result.append(' class="%s"' % aclass)
if lang: result.append(' lang="%s"' % lang)
if id: result.append(' id="%s"' % id)
if colspan: result.append(' colspan="%s"' % colspan)
if rowspan: result.append(' rowspan="%s"' % rowspan)
return ''.join(result)
def hasRawText(self, text):
"""
checks whether the text has text not already enclosed by a block tag
>>> t = Textile()
>>> t.hasRawText('<p>foo bar biz baz</p>')
False
>>> t.hasRawText(' why yes, yes it does')
True
"""
r = re.compile(r'<(p|blockquote|div|form|table|ul|ol|pre|h\d)[^>]*?>.*</\1>', re.S).sub('', text.strip()).strip()
r = re.compile(r'<(hr|br)[^>]*?/>').sub('', r)
return '' != r
def table(self, text):
r"""
>>> t = Textile()
>>> t.table('|one|two|three|\n|a|b|c|')
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>\n\n'
"""
text = text + "\n\n"
pattern = re.compile(r'^(?:table(_?%(s)s%(a)s%(c)s)\. ?\n)?^(%(a)s%(c)s\.? ?\|.*\|)\n\n' % {'s':self.s, 'a':self.a, 'c':self.c}, re.S|re.M|re.U)
return pattern.sub(self.fTable, text)
def fTable(self, match):
tatts = self.pba(match.group(1), 'table')
rows = []
for row in [ x for x in match.group(2).split('\n') if x]:
rmtch = re.search(r'^(%s%s\. )(.*)' % (self.a, self.c), row.lstrip())
if rmtch:
ratts = self.pba(rmtch.group(1), 'tr')
row = rmtch.group(2)
else: ratts = ''
cells = []
for cell in row.split('|'):
ctyp = 'd'
if re.search(r'^_', cell): ctyp = "h"
cmtch = re.search(r'^(_?%s%s%s\. )(.*)' % (self.s, self.a, self.c), cell)
if cmtch:
catts = self.pba(cmtch.group(1), 'td')
cell = cmtch.group(2)
else: catts = ''
cell = self.graf(self.span(cell))
if cell.strip() != '':
cells.append('\t\t\t<t%s%s>%s</t%s>' % (ctyp, catts, cell, ctyp))
rows.append("\t\t<tr%s>\n%s\n\t\t</tr>" % (ratts, '\n'.join(cells)))
cells = []
catts = None
return "\t<table%s>\n%s\n\t</table>\n\n" % (tatts, '\n'.join(rows))
def lists(self, text):
"""
>>> t = Textile()
>>> t.lists("* one\\n* two\\n* three")
'\\t<ul>\\n\\t\\t<li>one</li>\\n\\t\\t<li>two</li>\\n\\t\\t<li>three</li>\\n\\t</ul>'
"""
pattern = re.compile(r'^([#*]+%s .*)$(?![^#*])' % self.c, re.U|re.M|re.S)
return pattern.sub(self.fList, text)
def fList(self, match):
text = match.group(0).split("\n")
result = []
lists = []
for i, line in enumerate(text):
try:
nextline = text[i+1]
except IndexError:
nextline = ''
m = re.search(r"^([#*]+)(%s%s) (.*)$" % (self.a, self.c), line, re.S)
if m:
tl, atts, content = m.groups()
nl = ''
nm = re.search(r'^([#*]+)\s.*', nextline)
if nm:
nl = nm.group(1)
if tl not in lists:
lists.append(tl)
atts = self.pba(atts)
line = "\t<%sl%s>\n\t\t<li>%s" % (self.lT(tl), atts, self.graf(content))
else:
line = "\t\t<li>" + self.graf(content)
if len(nl) <= len(tl): line = line + "</li>"
for k in reversed(lists):
if len(k) > len(nl):
line = line + "\n\t</%sl>" % self.lT(k)
if len(k) > 1:
line = line + "</li>"
lists.remove(k)
result.append(line)
return "\n".join(result)
def lT(self, input):
if re.search(r'^#+', input):
return 'o'
else:
return 'u'
def doPBr(self, in_):
return re.compile(r'<(p)([^>]*?)>(.*)(</\1>)', re.S).sub(self.doBr, in_)
def doBr(self, match):
content = re.sub(r'(.+)(?:(?<!<br>)|(?<!<br />))\n(?![#*\s|])', '\\1<br />', match.group(3))
return '<%s%s>%s%s' % (match.group(1), match.group(2), content, match.group(4))
def block(self, text):
"""
>>> t = Textile()
>>> t.block('h1. foobar baby')
'\\t<h1>foobar baby</h1>'
"""
tre = '|'.join(self.btag)
text = text.split('\n\n')
tag = 'p'
atts = cite = graf = ext = ''
out = []
anon = False
for line in text:
pattern = r'^(%s)(%s%s)\.(\.?)(?::(\S+))? (.*)$' % (tre, self.a, self.c)
match = re.search(pattern, line, re.S)
if match:
if ext:
out.append(out.pop() + c1)
tag,atts,ext,cite,graf = match.groups()
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, graf)
# leave off c1 if this block is extended, we'll close it at the start of the next block
if ext:
line = "%s%s%s%s" % (o1, o2, content, c2)
else:
line = "%s%s%s%s%s" % (o1, o2, content, c2, c1)
else:
anon = True
if ext or not re.search(r'^\s', line):
o1, o2, content, c2, c1 = self.fBlock(tag, atts, ext, cite, line)
# skip $o1/$c1 because this is part of a continuing extended block
if tag == 'p' and not self.hasRawText(content):
line = content
else:
line = "%s%s%s" % (o2, content, c2)
else:
line = self.graf(line)
line = self.doPBr(line)
line = re.sub(r'<br>', '<br />', line)
if ext and anon:
out.append(out.pop() + "\n" + line)
else:
out.append(line)
if not ext:
tag = 'p'
atts = ''
cite = ''
graf = ''
if ext:
out.append(out.pop() + c1)
return '\n\n'.join(out)
def fBlock(self, tag, atts, ext, cite, content):
"""
>>> t = Textile()
>>> t.fBlock("bq", "", None, "", "Hello BlockQuote")
('\\t<blockquote>\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bq", "", None, "http://google.com", "Hello BlockQuote")
('\\t<blockquote cite="http://google.com">\\n', '\\t\\t<p>', 'Hello BlockQuote', '</p>', '\\n\\t</blockquote>')
>>> t.fBlock("bc", "", None, "", 'printf "Hello, World";') # doctest: +ELLIPSIS
('<pre>', '<code>', ..., '</code>', '</pre>')
>>> t.fBlock("h1", "", None, "", "foobar")
('', '\\t<h1>', 'foobar', '</h1>', '')
"""
atts = self.pba(atts)
o1 = o2 = c2 = c1 = ''
m = re.search(r'fn(\d+)', tag)
if m:
tag = 'p'
if m.group(1) in self.fn:
fnid = self.fn[m.group(1)]
else:
fnid = m.group(1)
atts = atts + ' id="fn%s"' % fnid
if atts.find('class=') < 0:
atts = atts + ' class="footnote"'
content = ('<sup>%s</sup>' % m.group(1)) + content
if tag == 'bq':
cite = self.checkRefs(cite)
if cite:
cite = ' cite="%s"' % cite
else:
cite = ''
o1 = "\t<blockquote%s%s>\n" % (cite, atts)
o2 = "\t\t<p%s>" % atts
c2 = "</p>"
c1 = "\n\t</blockquote>"
elif tag == 'bc':
o1 = "<pre%s>" % atts
o2 = "<code%s>" % atts
c2 = "</code>"
c1 = "</pre>"
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
elif tag == 'notextile':
content = self.shelve(content)
o1 = o2 = ''
c1 = c2 = ''
elif tag == 'pre':
content = self.shelve(self.encode_html(content.rstrip("\n") + "\n"))
o1 = "<pre%s>" % atts
o2 = c2 = ''
c1 = '</pre>'
else:
o2 = "\t<%s%s>" % (tag, atts)
c2 = "</%s>" % tag
content = self.graf(content)
return o1, o2, content, c2, c1
def footnoteRef(self, text):
"""
>>> t = Textile()
>>> t.footnoteRef('foo[1] ') # doctest: +ELLIPSIS
'foo<sup class="footnote"><a href="#fn...">1</a></sup> '
"""
return re.sub(r'\b\[([0-9]+)\](\s)?', self.footnoteID, text)
def footnoteID(self, match):
id, t = match.groups()
if id not in self.fn:
self.fn[id] = str(uuid.uuid4())
fnid = self.fn[id]
if not t: t = ''
return '<sup class="footnote"><a href="#fn%s">%s</a></sup>%s' % (fnid, id, t)
def glyphs(self, text):
"""
>>> t = Textile()
>>> t.glyphs("apostrophe's")
'apostrophe’s'
>>> t.glyphs("back in '88")
'back in ’88'
>>> t.glyphs('foo ...')
'foo …'
>>> t.glyphs('--')
'—'
>>> t.glyphs('FooBar[tm]')
'FooBar™'
>>> t.glyphs("<p><cite>Cat's Cradle</cite> by Vonnegut</p>")
'<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'
"""
# fix: hackish
text = re.sub(r'"\z', '\" ', text)
glyph_search = (
re.compile(r"(\w)\'(\w)"), # apostrophe's
re.compile(r'(\s)\'(\d+\w?)\b(?!\')'), # back in '88
re.compile(r'(\S)\'(?=\s|'+self.pnct+'|<|$)'), # single closing
re.compile(r'\'/'), # single opening
re.compile(r'(\S)\"(?=\s|'+self.pnct+'|<|$)'), # double closing
re.compile(r'"'), # double opening
re.compile(r'\b([A-Z][A-Z0-9]{2,})\b(?:[(]([^)]*)[)])'), # 3+ uppercase acronym
re.compile(r'\b([A-Z][A-Z\'\-]+[A-Z])(?=[\s.,\)>])'), # 3+ uppercase
re.compile(r'\b(\s{0,1})?\.{3}'), # ellipsis
re.compile(r'(\s?)--(\s?)'), # em dash
re.compile(r'\s-(?:\s|$)'), # en dash
re.compile(r'(\d+)( ?)x( ?)(?=\d+)'), # dimension sign
re.compile(r'\b ?[([]TM[])]', re.I), # trademark
re.compile(r'\b ?[([]R[])]', re.I), # registered
re.compile(r'\b ?[([]C[])]', re.I), # copyright
)
glyph_replace = [x % dict(self.glyph_defaults) for x in (
r'\1%(txt_apostrophe)s\2', # apostrophe's
r'\1%(txt_apostrophe)s\2', # back in '88
r'\1%(txt_quote_single_close)s', # single closing
r'%(txt_quote_single_open)s', # single opening
r'\1%(txt_quote_double_close)s', # double closing
r'%(txt_quote_double_open)s', # double opening
r'<acronym title="\2">\1</acronym>', # 3+ uppercase acronym
r'<span class="caps">\1</span>', # 3+ uppercase
r'\1%(txt_ellipsis)s', # ellipsis
r'\1%(txt_emdash)s\2', # em dash
r' %(txt_endash)s ', # en dash
r'\1\2%(txt_dimension)s\3', # dimension sign
r'%(txt_trademark)s', # trademark
r'%(txt_registered)s', # registered
r'%(txt_copyright)s', # copyright
)]
result = []
for line in re.compile(r'(<.*?>)', re.U).split(text):
if not re.search(r'<.*>', line):
for s, r in zip(glyph_search, glyph_replace):
line = s.sub(r, line)
result.append(line)
return ''.join(result)
def iAlign(self, input):
d = {'<':'left', '=':'center', '>':'right'}
return d.get(input, '')
def vAlign(self, input):
d = {'^':'top', '-':'middle', '~':'bottom'}
return d.get(input, '')
def hAlign(self, input):
d = {'<':'left', '=':'center', '>':'right', '<>': 'justify'}
return d.get(input, '')
def getRefs(self, text):
"""
what is this for?
"""
pattern = re.compile(r'(?:(?<=^)|(?<=\s))\[(.+)\]((?:http:\/\/|\/)\S+)(?=\s|$)', re.U)
text = pattern.sub(self.refs, text)
return text
def refs(self, match):
flag, url = match.groups()
self.urlrefs[flag] = url
return ''
def checkRefs(self, url):
return self.urlrefs.get(url, url)
def relURL(self, url):
o = urlparse(url)
if (not o.scheme or o.scheme == 'http') and not o.netloc and re.search(r'^\w', o.path):
url = self.hu + url
if self.restricted and o.scheme and o.scheme not in self.url_schemes:
return '#'
return url
def shelve(self, text):
id = str(uuid.uuid4())
self.shelf[id] = text
return id
def retrieve(self, text):
"""
>>> t = Textile()
>>> id = t.shelve("foobar")
>>> t.retrieve(id)
'foobar'
"""
while True:
old = text
for k,v in self.shelf.items():
text = text.replace(k,v)
if text == old: break
return text
def encode_html(self, text, quotes=True):
a = (
('&', '&'),
('<', '<'),
('>', '>')
)
if quotes:
a = a + (
("'", '''),
('"', '"')
)
for k,v in a:
text = text.replace(k,v)
return text
def graf(self, text):
if not self.lite:
text = self.noTextile(text)
text = self.code(text)
text = self.links(text)
if not self.noimage:
text = self.image(text)
if not self.lite:
text = self.lists(text)
text = self.table(text)
text = self.span(text)
text = self.footnoteRef(text)
text = self.glyphs(text)
return text.rstrip('\n')
def links(self, text):
"""
>>> t = Textile()
>>> t.links('fooobar "Google":http://google.com/foobar/ and hello world "flickr":http://flickr.com/photos/jsamsa/ ') # doctest: +ELLIPSIS
'fooobar ... and hello world ...'
"""
punct = '!"#$%&\'*+,-./:;=?@\\^_`|~'
pattern = r'''
([\s\[{(]|[%s])? # $pre
" # start
(%s) # $atts
([^"]+?) # $text
\s?
(?:\(([^)]+?)\)(?="))? # $title
":
(\S+?) # $url
(\/)? # $slash
([^\w\/;]*?) # $post
(?=<|\s|$)
''' % (re.escape(punct), self.c)
text = re.compile(pattern, re.X).sub(self.fLink, text)
return text
def fLink(self, match):
pre, atts, text, title, url, slash, post = match.groups()
# print "## ", zip("pre, atts, text, title, url, slash, post".split(","), match.groups())
# print
if pre == None:
pre = ''
url = self.checkRefs(url)
atts = self.pba(atts)
if title: atts = atts + ' title="%s"' % self.encode_html(title)
if not self.noimage:
text = self.image(text)
text = self.span(text)
text = self.glyphs(text)
url = self.relURL(url)
if slash: url = url + slash
out = '<a href="%s"%s%s>%s</a>' % (self.encode_html(url), atts, self.rel, text)
out = self.shelve(out)
return ''.join([pre, out, post])
def span(self, text):
"""
>>> t = Textile()
>>> t.span(r"hello %(bob)span *strong* and **bold**% goodbye")
'hello <span class="bob">span <strong>strong</strong> and <b>bold</b></span> goodbye'
"""
qtags = (r'\*\*', r'\*', r'\?\?', r'\-', r'__', r'_', r'%', r'\+', r'~', r'\^')
pnct = ".,\"'?!;:"
for qtag in qtags:
pattern = re.compile(r"""
(?:^|(?<=[\s>%(pnct)s])|([\]}]))
(%(qtag)s)(?!%(qtag)s)
(%(c)s)
(?::(\S+))?
([^\s%(qtag)s]+|\S[^%(qtag)s\n]*[^\s%(qtag)s\n])
([%(pnct)s]*)
%(qtag)s
(?:$|([\]}])|(?=%(selfpnct)s{1,2}|\s))
""" % {'qtag':qtag,'c':self.c,'pnct':pnct,'selfpnct':self.pnct}, re.X)
text = pattern.sub(self.fSpan, text)
return text
def fSpan(self, match):
_, tag, atts, cite, content, end, _ = match.groups()
qtags = {
'*': 'strong',
'**': 'b',
'??': 'cite',
'_' : 'em',
'__': 'i',
'-' : 'del',
'%' : 'span',
'+' : 'ins',
'~' : 'sub',
'^' : 'sup'
}
tag = qtags[tag]
atts = self.pba(atts)
if cite:
atts = atts + 'cite="%s"' % cite
out = "<%s%s>%s%s</%s>" % (tag, atts, content, end, tag)
return out;
def image(self, text):
"""
>>> t = Textile()
>>> t.image('!/imgs/myphoto.jpg!:http://jsamsa.com')
'<a href="http://jsamsa.com"><img src="/imgs/myphoto.jpg" alt="" /></a>'
"""
pattern = re.compile(r"""
(?:[\[{])? # pre
\! # opening !
(\<|\=|\>)?? # optional alignment atts
(%s) # optional style,class atts
(?:\. )? # optional dot-space
([^\s(!]+) # presume this is the src
\s? # optional space
(?:\(([^\)]+)\))? # optional title
\! # closing
(?::(\S+))? # optional href
(?:[\]}]|(?=\s|$)) # lookahead: space or end of string
""" % self.c, re.U|re.X)
return pattern.sub(self.fImage, text)
def fImage(self, match):
# (None, '', '/imgs/myphoto.jpg', None, None)
algn, atts, url, title, href = match.groups()
atts = self.pba(atts)
if algn:
atts = atts + ' align="%s"' % self.iAlign(algn)
if title:
atts = atts + ' title="%s" alt="%s"' % (title, title)
else:
atts = atts + ' alt=""'
# TODO how to do this in python?
# size = @getimagesize(url)
# if (size) atts .= " size[3]"
if href:
href = self.checkRefs(href)
url = self.checkRefs(url)
url = self.relURL(url)
out = []
if href: out.append('<a href="%s">' % href)
out.append('<img src="%s"%s />' % (url, atts))
if href: out.append('</a>')
return ''.join(out)
def code(self, text):
text = self.doSpecial(text, '<code>', '</code>', self.fCode)
text = self.doSpecial(text, '@', '@', self.fCode)
text = self.doSpecial(text, '<pre>', '</pre>', self.fPre)
return text
def fCode(self, match):
before, text, after = match.groups()
if after == None: after = ''
# text needs to be escaped
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, self.shelve('<code>%s</code>' % text), after])
def fPre(self, match):
before, text, after = match.groups()
if after == None: after = ''
# text needs to be escapedd
if not self.restricted:
text = self.encode_html(text)
return ''.join([before, '<pre>', self.shelve(text), '</pre>', after])
def doSpecial(self, text, start, end, method=None):
if method == None:
method = self.fSpecial
pattern = re.compile(r'(^|\s|[\[({>])%s(.*?)%s(\s|$|[\])}])?' % (re.escape(start), re.escape(end)), re.M|re.S)
return pattern.sub(method, text)
def fSpecial(self, match):
"""
special blocks like notextile or code
"""
before, text, after = match.groups()
if after == None: after = ''
return ''.join([before, self.shelve(self.encode_html(text)), after])
def noTextile(self, text):
text = self.doSpecial(text, '<notextile>', '</notextile>', self.fTextile)
return self.doSpecial(text, '==', '==', self.fTextile)
def fTextile(self, match):
before, notextile, after = match.groups()
if after == None: after = ''
return ''.join([before, self.shelve(notextile), after])
def textile(text, **args):
"""
this function takes additional parameters:
encoding - input encoding (default: 'utf-8')
output - output encoding (default: 'utf-8')
validate - perform mxTidy or uTidyLib validation (default: False)
sanitize - sanitize output good for weblog comments (default: False)
head_offset - ignored
"""
return Textile().textile(text, **args)
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
import sys
if len(sys.argv) == 2:
f = open(sys.argv[1])
text = ''.join(f.readlines())
print Textile().textile(text)
else:
_test()
| Python |
#!/usr/bin/env python
import textile
import unittest
import re
"""
('>>> import textile')
'<p>>>> import textile</p>'
"""
class KnownValues(unittest.TestCase):
known_values = (
('hello, world', '\t<p>hello, world</p>'),
('A single paragraph.\n\nFollowed by another.','\t<p>A single paragraph.</p>\n\n\t<p>Followed by another.</p>'),
('I am <b>very</b> serious.\n\n<pre>\nI am <b>very</b> serious.\n</pre>',
'\t<p>I am <b>very</b> serious.</p>\n\n<pre>\nI am <b>very</b> serious.\n</pre>'),
('I spoke.\nAnd none replied.', '\t<p>I spoke.<br />And none replied.</p>'),
('"Observe!"', '\t<p>“Observe!”</p>'),
('Observe -- very nice!', '\t<p>Observe — very nice!</p>'),
('Observe - tiny and brief.', '\t<p>Observe – tiny and brief.</p>'),
('Observe...', '\t<p>Observe…</p>'),
('Observe ...', '\t<p>Observe …</p>'),
('Observe: 2 x 2.', '\t<p>Observe: 2 × 2.</p>'),
('one(TM), two(R), three(C).', '\t<p>one™, two®, three©.</p>'),
('h1. Header 1', '\t<h1>Header 1</h1>'),
('h2. Header 2', '\t<h2>Header 2</h2>'),
('h3. Header 3', '\t<h3>Header 3</h3>'),
('An old text\n\nbq. A block quotation.\n\nAny old text''',
'\t<p>An old text</p>\n\n\t<blockquote>\n\t\t<p>A block quotation.</p>\n\t</blockquote>\n\n\t<p>Any old text</p>'),
('I _believe_ every word.', '\t<p>I <em>believe</em> every word.</p>'),
('And then? She *fell*!', '\t<p>And then? She <strong>fell</strong>!</p>'),
('I __know__.\nI **really** __know__.', '\t<p>I <i>know</i>.<br />I <b>really</b> <i>know</i>.</p>'),
("??Cat's Cradle?? by Vonnegut", '\t<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'),
('Convert with @str(foo)@', '\t<p>Convert with <code>str(foo)</code></p>'),
('I\'m -sure- not sure.', '\t<p>I’m <del>sure</del> not sure.</p>'),
('You are a +pleasant+ child.', '\t<p>You are a <ins>pleasant</ins> child.</p>'),
('a ^2^ + b ^2^ = c ^2^', '\t<p>a <sup>2</sup> + b <sup>2</sup> = c <sup>2</sup></p>'),
('log ~2~ x', '\t<p>log <sub>2</sub> x</p>'),
('I\'m %unaware% of most soft drinks.', '\t<p>I’m <span>unaware</span> of most soft drinks.</p>'),
("I'm %{color:red}unaware%\nof most soft drinks.", '\t<p>I’m <span style="color:red;">unaware</span><br />of most soft drinks.</p>'),
('p(example1). An example', '\t<p class="example1">An example</p>'),
('p(#big-red). Red here', '\t<p id="big-red">Red here</p>'),
('p(example1#big-red2). Red here', '\t<p class="example1" id="big-red2">Red here</p>'),
('p{color:blue;margin:30px}. Spacey blue', '\t<p style="color:blue;margin:30px;">Spacey blue</p>'),
('p[fr]. rouge', '\t<p lang="fr">rouge</p>'),
('I seriously *{color:red}blushed*\nwhen I _(big)sprouted_ that\ncorn stalk from my\n%[es]cabeza%.',
'\t<p>I seriously <strong style="color:red;">blushed</strong><br />when I <em class="big">sprouted</em>'
' that<br />corn stalk from my<br /><span lang="es">cabeza</span>.</p>'),
('p<. align left', '\t<p style="text-align:left;">align left</p>'),
('p>. align right', '\t<p style="text-align:right;">align right</p>'),
('p=. centered', '\t<p style="text-align:center;">centered</p>'),
('p<>. justified', '\t<p style="text-align:justify;">justified</p>'),
('p(. left ident 1em', '\t<p style="padding-left:1em;">left ident 1em</p>'),
('p((. left ident 2em', '\t<p style="padding-left:2em;">left ident 2em</p>'),
('p))). right ident 3em', '\t<p style="padding-right:3em;">right ident 3em</p>'),
('h2()>. Bingo.', '\t<h2 style="padding-left:1em;padding-right:1em;text-align:right;">Bingo.</h2>'),
('h3()>[no]{color:red}. Bingo', '\t<h3 style="color:red;padding-left:1em;padding-right:1em;text-align:right;" lang="no">Bingo</h3>'),
('<pre>\n<code>\na.gsub!( /</, "" )\n</code>\n</pre>',
'<pre>\n<code>\na.gsub!( /</, "" )\n</code>\n</pre>'),
('<div style="float:right;">\n\nh3. Sidebar\n\n"Hobix":http://hobix.com/\n"Ruby":http://ruby-lang.org/\n\n</div>\n\n'
'The main text of the\npage goes here and will\nstay to the left of the\nsidebar.',
'\t<p><div style="float:right;"></p>\n\n\t<h3>Sidebar</h3>\n\n\t<p><a href="http://hobix.com/">Hobix</a><br />'
'<a href="http://ruby-lang.org/">Ruby</a></p>\n\n\t<p></div></p>\n\n\t<p>The main text of the<br />'
'page goes here and will<br />stay to the left of the<br />sidebar.</p>'),
('# A first item\n# A second item\n# A third',
'\t<ol>\n\t\t<li>A first item</li>\n\t\t<li>A second item</li>\n\t\t<li>A third</li>\n\t</ol>'),
('# Fuel could be:\n## Coal\n## Gasoline\n## Electricity\n# Humans need only:\n## Water\n## Protein',
'\t<ol>\n\t\t<li>Fuel could be:\n\t<ol>\n\t\t<li>Coal</li>\n\t\t<li>Gasoline</li>\n\t\t<li>Electricity</li>\n\t</ol></li>\n\t\t'
'<li>Humans need only:\n\t<ol>\n\t\t<li>Water</li>\n\t\t<li>Protein</li>\n\t</ol></li>\n\t</ol>'),
('* A first item\n* A second item\n* A third',
'\t<ul>\n\t\t<li>A first item</li>\n\t\t<li>A second item</li>\n\t\t<li>A third</li>\n\t</ul>'),
('* Fuel could be:\n** Coal\n** Gasoline\n** Electricity\n* Humans need only:\n** Water\n** Protein',
'\t<ul>\n\t\t<li>Fuel could be:\n\t<ul>\n\t\t<li>Coal</li>\n\t\t<li>Gasoline</li>\n\t\t<li>Electricity</li>\n\t</ul></li>\n\t\t'
'<li>Humans need only:\n\t<ul>\n\t\t<li>Water</li>\n\t\t<li>Protein</li>\n\t</ul></li>\n\t</ul>'),
('I searched "Google":http://google.com.', '\t<p>I searched <a href="http://google.com">Google</a>.</p>'),
('I am crazy about "Hobix":hobix\nand "it\'s":hobix "all":hobix I ever\n"link to":hobix!\n\n[hobix]http://hobix.com',
'\t<p>I am crazy about <a href="http://hobix.com">Hobix</a><br />and <a href="http://hobix.com">it’s</a> '
'<a href="http://hobix.com">all</a> I ever<br /><a href="http://hobix.com">link to</a>!</p>\n\n'),
('!http://hobix.com/sample.jpg!', '\t<p><img src="http://hobix.com/sample.jpg" alt="" /></p>'),
('!openwindow1.gif(Bunny.)!', '\t<p><img src="openwindow1.gif" title="Bunny." alt="Bunny." /></p>'),
('!openwindow1.gif!:http://hobix.com/', '\t<p><a href="http://hobix.com/"><img src="openwindow1.gif" alt="" /></a></p>'),
('!>obake.gif!\n\nAnd others sat all round the small\nmachine and paid it to sing to them.',
'\t<p><img src="obake.gif" style="text-align:right;" alt="" /></p>\n\n\t'
'<p>And others sat all round the small<br />machine and paid it to sing to them.</p>'),
('We use CSS(Cascading Style Sheets).', '\t<p>We use <acronym title="Cascading Style Sheets">CSS</acronym>.</p>'),
('|one|two|three|\n|a|b|c|',
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>'),
('| name | age | sex |\n| joan | 24 | f |\n| archie | 29 | m |\n| bella | 45 | f |',
'\t<table>\n\t\t<tr>\n\t\t\t<td> name </td>\n\t\t\t<td> age </td>\n\t\t\t<td> sex </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> joan </td>\n\t\t\t<td> 24 </td>\n\t\t\t<td> f </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> archie </td>\n\t\t\t<td> 29 </td>\n\t\t\t<td> m </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> bella </td>\n\t\t\t<td> 45 </td>\n\t\t\t<td> f </td>\n\t\t</tr>\n\t</table>'),
('|_. name |_. age |_. sex |\n| joan | 24 | f |\n| archie | 29 | m |\n| bella | 45 | f |',
'\t<table>\n\t\t<tr>\n\t\t\t<th>name </th>\n\t\t\t<th>age </th>\n\t\t\t<th>sex </th>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> joan </td>\n\t\t\t<td> 24 </td>\n\t\t\t<td> f </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> archie </td>\n\t\t\t<td> 29 </td>\n\t\t\t<td> m </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> bella </td>\n\t\t\t<td> 45 </td>\n\t\t\t<td> f </td>\n\t\t</tr>\n\t</table>'),
# ('<script>alert("hello");</script>', ''),
('pre.. Hello\n\nHello Again\n\np. normal text', '<pre>Hello\n\nHello Again\n</pre>\n\n\t<p>normal text</p>'),
('<pre>this is in a pre tag</pre>', '<pre>this is in a pre tag</pre>'),
('"test1":http://foo.com/bar--baz\n\n"test2":http://foo.com/bar---baz\n\n"test3":http://foo.com/bar-17-18-baz',
'\t<p><a href="http://foo.com/bar--baz">test1</a></p>\n\n\t'
'<p><a href="http://foo.com/bar---baz">test2</a></p>\n\n\t'
'<p><a href="http://foo.com/bar-17-18-baz">test3</a></p>'),
# ('"foo ==(bar)==":#foobar', '\t<p><a href="#foobar">foo (bar)</a></p>'),
('!http://render.mathim.com/A%5EtAx%20%3D%20A%5Et%28Ax%29.!',
'\t<p><img src="http://render.mathim.com/A%5EtAx%20%3D%20A%5Et%28Ax%29." alt="" /></p>'),
('* Point one\n* Point two\n## Step 1\n## Step 2\n## Step 3\n* Point three\n** Sub point 1\n** Sub point 2',
'\t<ul>\n\t\t<li>Point one</li>\n\t\t<li>Point two\n\t<ol>\n\t\t<li>Step 1</li>\n\t\t<li>Step 2</li>\n\t\t'
'<li>Step 3</li>\n\t</ol></li>\n\t\t<li>Point three\n\t<ul>\n\t\t<li>Sub point 1</li>\n\t\t'
'<li>Sub point 2</li>\n\t</ul></li>\n\t</ul>'),
('@array[4] = 8@', '\t<p><code>array[4] = 8</code></p>'),
('#{color:blue} one\n# two\n# three',
'\t<ol style="color:blue;">\n\t\t<li>one</li>\n\t\t<li>two</li>\n\t\t<li>three</li>\n\t</ol>'),
('Links (like "this":http://foo.com), are now mangled in 2.1.0, whereas 2.0 parsed them correctly.',
'\t<p>Links (like <a href="http://foo.com">this</a>), are now mangled in 2.1.0, whereas 2.0 parsed them correctly.</p>'),
('@monospaced text@, followed by text',
'\t<p><code>monospaced text</code>, followed by text</p>'),
)
def testKnownValues(self):
for t, h in self.known_values:
self.assertEqual(textile.textile(t), h)
def testFootnoteReference(self):
html = textile.textile('This is covered elsewhere[1].')
self.assertTrue(re.search('^\t<p>This is covered elsewhere<sup class="footnote"><a href="#fn[a-z0-9-]+">1</a></sup>.</p>$', html))
html = textile.textile('YACC[1]')
self.assertTrue(re.search('^\t<p>YACC<sup class="footnote"><a href="#fn[a-z0-9-]+">1</a></sup></p>', html))
def testFootnote(self):
html = textile.textile('fn1. Down here, in fact.')
self.assertTrue(re.search('^\t<p id="fn[a-z0-9-]+" class="footnote"><sup>1</sup>Down here, in fact.</p>$', html))
def testURLWithHyphens(self):
self.assertEqual(textile.textile('"foo":http://google.com/one--two'), '\t<p><a href="http://google.com/one--two">foo</a></p>')
def testUnicode(self):
self.assertEqual(textile.textile(u'hello\u4500world'), '\t<p>hello\xe4\x94\x80world</p>')
self.assertEqual(textile.textile(u'\u4500', encoding='utf8', output='utf8'), u'\t<p>\u4500</p>'.encode('utf8'))
if __name__ == "__main__":
unittest.main()
| Python |
#!/usr/bin/env python
import textile
import unittest
import re
"""
('>>> import textile')
'<p>>>> import textile</p>'
"""
class KnownValues(unittest.TestCase):
known_values = (
('hello, world', '\t<p>hello, world</p>'),
('A single paragraph.\n\nFollowed by another.','\t<p>A single paragraph.</p>\n\n\t<p>Followed by another.</p>'),
('I am <b>very</b> serious.\n\n<pre>\nI am <b>very</b> serious.\n</pre>',
'\t<p>I am <b>very</b> serious.</p>\n\n<pre>\nI am <b>very</b> serious.\n</pre>'),
('I spoke.\nAnd none replied.', '\t<p>I spoke.<br />And none replied.</p>'),
('"Observe!"', '\t<p>“Observe!”</p>'),
('Observe -- very nice!', '\t<p>Observe — very nice!</p>'),
('Observe - tiny and brief.', '\t<p>Observe – tiny and brief.</p>'),
('Observe...', '\t<p>Observe…</p>'),
('Observe ...', '\t<p>Observe …</p>'),
('Observe: 2 x 2.', '\t<p>Observe: 2 × 2.</p>'),
('one(TM), two(R), three(C).', '\t<p>one™, two®, three©.</p>'),
('h1. Header 1', '\t<h1>Header 1</h1>'),
('h2. Header 2', '\t<h2>Header 2</h2>'),
('h3. Header 3', '\t<h3>Header 3</h3>'),
('An old text\n\nbq. A block quotation.\n\nAny old text''',
'\t<p>An old text</p>\n\n\t<blockquote>\n\t\t<p>A block quotation.</p>\n\t</blockquote>\n\n\t<p>Any old text</p>'),
('I _believe_ every word.', '\t<p>I <em>believe</em> every word.</p>'),
('And then? She *fell*!', '\t<p>And then? She <strong>fell</strong>!</p>'),
('I __know__.\nI **really** __know__.', '\t<p>I <i>know</i>.<br />I <b>really</b> <i>know</i>.</p>'),
("??Cat's Cradle?? by Vonnegut", '\t<p><cite>Cat’s Cradle</cite> by Vonnegut</p>'),
('Convert with @str(foo)@', '\t<p>Convert with <code>str(foo)</code></p>'),
('I\'m -sure- not sure.', '\t<p>I’m <del>sure</del> not sure.</p>'),
('You are a +pleasant+ child.', '\t<p>You are a <ins>pleasant</ins> child.</p>'),
('a ^2^ + b ^2^ = c ^2^', '\t<p>a <sup>2</sup> + b <sup>2</sup> = c <sup>2</sup></p>'),
('log ~2~ x', '\t<p>log <sub>2</sub> x</p>'),
('I\'m %unaware% of most soft drinks.', '\t<p>I’m <span>unaware</span> of most soft drinks.</p>'),
("I'm %{color:red}unaware%\nof most soft drinks.", '\t<p>I’m <span style="color:red;">unaware</span><br />of most soft drinks.</p>'),
('p(example1). An example', '\t<p class="example1">An example</p>'),
('p(#big-red). Red here', '\t<p id="big-red">Red here</p>'),
('p(example1#big-red2). Red here', '\t<p class="example1" id="big-red2">Red here</p>'),
('p{color:blue;margin:30px}. Spacey blue', '\t<p style="color:blue;margin:30px;">Spacey blue</p>'),
('p[fr]. rouge', '\t<p lang="fr">rouge</p>'),
('I seriously *{color:red}blushed*\nwhen I _(big)sprouted_ that\ncorn stalk from my\n%[es]cabeza%.',
'\t<p>I seriously <strong style="color:red;">blushed</strong><br />when I <em class="big">sprouted</em>'
' that<br />corn stalk from my<br /><span lang="es">cabeza</span>.</p>'),
('p<. align left', '\t<p style="text-align:left;">align left</p>'),
('p>. align right', '\t<p style="text-align:right;">align right</p>'),
('p=. centered', '\t<p style="text-align:center;">centered</p>'),
('p<>. justified', '\t<p style="text-align:justify;">justified</p>'),
('p(. left ident 1em', '\t<p style="padding-left:1em;">left ident 1em</p>'),
('p((. left ident 2em', '\t<p style="padding-left:2em;">left ident 2em</p>'),
('p))). right ident 3em', '\t<p style="padding-right:3em;">right ident 3em</p>'),
('h2()>. Bingo.', '\t<h2 style="padding-left:1em;padding-right:1em;text-align:right;">Bingo.</h2>'),
('h3()>[no]{color:red}. Bingo', '\t<h3 style="color:red;padding-left:1em;padding-right:1em;text-align:right;" lang="no">Bingo</h3>'),
('<pre>\n<code>\na.gsub!( /</, "" )\n</code>\n</pre>',
'<pre>\n<code>\na.gsub!( /</, "" )\n</code>\n</pre>'),
('<div style="float:right;">\n\nh3. Sidebar\n\n"Hobix":http://hobix.com/\n"Ruby":http://ruby-lang.org/\n\n</div>\n\n'
'The main text of the\npage goes here and will\nstay to the left of the\nsidebar.',
'\t<p><div style="float:right;"></p>\n\n\t<h3>Sidebar</h3>\n\n\t<p><a href="http://hobix.com/">Hobix</a><br />'
'<a href="http://ruby-lang.org/">Ruby</a></p>\n\n\t<p></div></p>\n\n\t<p>The main text of the<br />'
'page goes here and will<br />stay to the left of the<br />sidebar.</p>'),
('# A first item\n# A second item\n# A third',
'\t<ol>\n\t\t<li>A first item</li>\n\t\t<li>A second item</li>\n\t\t<li>A third</li>\n\t</ol>'),
('# Fuel could be:\n## Coal\n## Gasoline\n## Electricity\n# Humans need only:\n## Water\n## Protein',
'\t<ol>\n\t\t<li>Fuel could be:\n\t<ol>\n\t\t<li>Coal</li>\n\t\t<li>Gasoline</li>\n\t\t<li>Electricity</li>\n\t</ol></li>\n\t\t'
'<li>Humans need only:\n\t<ol>\n\t\t<li>Water</li>\n\t\t<li>Protein</li>\n\t</ol></li>\n\t</ol>'),
('* A first item\n* A second item\n* A third',
'\t<ul>\n\t\t<li>A first item</li>\n\t\t<li>A second item</li>\n\t\t<li>A third</li>\n\t</ul>'),
('* Fuel could be:\n** Coal\n** Gasoline\n** Electricity\n* Humans need only:\n** Water\n** Protein',
'\t<ul>\n\t\t<li>Fuel could be:\n\t<ul>\n\t\t<li>Coal</li>\n\t\t<li>Gasoline</li>\n\t\t<li>Electricity</li>\n\t</ul></li>\n\t\t'
'<li>Humans need only:\n\t<ul>\n\t\t<li>Water</li>\n\t\t<li>Protein</li>\n\t</ul></li>\n\t</ul>'),
('I searched "Google":http://google.com.', '\t<p>I searched <a href="http://google.com">Google</a>.</p>'),
('I am crazy about "Hobix":hobix\nand "it\'s":hobix "all":hobix I ever\n"link to":hobix!\n\n[hobix]http://hobix.com',
'\t<p>I am crazy about <a href="http://hobix.com">Hobix</a><br />and <a href="http://hobix.com">it’s</a> '
'<a href="http://hobix.com">all</a> I ever<br /><a href="http://hobix.com">link to</a>!</p>\n\n'),
('!http://hobix.com/sample.jpg!', '\t<p><img src="http://hobix.com/sample.jpg" alt="" /></p>'),
('!openwindow1.gif(Bunny.)!', '\t<p><img src="openwindow1.gif" title="Bunny." alt="Bunny." /></p>'),
('!openwindow1.gif!:http://hobix.com/', '\t<p><a href="http://hobix.com/"><img src="openwindow1.gif" alt="" /></a></p>'),
('!>obake.gif!\n\nAnd others sat all round the small\nmachine and paid it to sing to them.',
'\t<p><img src="obake.gif" style="text-align:right;" alt="" /></p>\n\n\t'
'<p>And others sat all round the small<br />machine and paid it to sing to them.</p>'),
('We use CSS(Cascading Style Sheets).', '\t<p>We use <acronym title="Cascading Style Sheets">CSS</acronym>.</p>'),
('|one|two|three|\n|a|b|c|',
'\t<table>\n\t\t<tr>\n\t\t\t<td>one</td>\n\t\t\t<td>two</td>\n\t\t\t<td>three</td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td>a</td>\n\t\t\t<td>b</td>\n\t\t\t<td>c</td>\n\t\t</tr>\n\t</table>'),
('| name | age | sex |\n| joan | 24 | f |\n| archie | 29 | m |\n| bella | 45 | f |',
'\t<table>\n\t\t<tr>\n\t\t\t<td> name </td>\n\t\t\t<td> age </td>\n\t\t\t<td> sex </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> joan </td>\n\t\t\t<td> 24 </td>\n\t\t\t<td> f </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> archie </td>\n\t\t\t<td> 29 </td>\n\t\t\t<td> m </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> bella </td>\n\t\t\t<td> 45 </td>\n\t\t\t<td> f </td>\n\t\t</tr>\n\t</table>'),
('|_. name |_. age |_. sex |\n| joan | 24 | f |\n| archie | 29 | m |\n| bella | 45 | f |',
'\t<table>\n\t\t<tr>\n\t\t\t<th>name </th>\n\t\t\t<th>age </th>\n\t\t\t<th>sex </th>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> joan </td>\n\t\t\t<td> 24 </td>\n\t\t\t<td> f </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> archie </td>\n\t\t\t<td> 29 </td>\n\t\t\t<td> m </td>\n\t\t</tr>'
'\n\t\t<tr>\n\t\t\t<td> bella </td>\n\t\t\t<td> 45 </td>\n\t\t\t<td> f </td>\n\t\t</tr>\n\t</table>'),
# ('<script>alert("hello");</script>', ''),
('pre.. Hello\n\nHello Again\n\np. normal text', '<pre>Hello\n\nHello Again\n</pre>\n\n\t<p>normal text</p>'),
('<pre>this is in a pre tag</pre>', '<pre>this is in a pre tag</pre>'),
('"test1":http://foo.com/bar--baz\n\n"test2":http://foo.com/bar---baz\n\n"test3":http://foo.com/bar-17-18-baz',
'\t<p><a href="http://foo.com/bar--baz">test1</a></p>\n\n\t'
'<p><a href="http://foo.com/bar---baz">test2</a></p>\n\n\t'
'<p><a href="http://foo.com/bar-17-18-baz">test3</a></p>'),
# ('"foo ==(bar)==":#foobar', '\t<p><a href="#foobar">foo (bar)</a></p>'),
('!http://render.mathim.com/A%5EtAx%20%3D%20A%5Et%28Ax%29.!',
'\t<p><img src="http://render.mathim.com/A%5EtAx%20%3D%20A%5Et%28Ax%29." alt="" /></p>'),
('* Point one\n* Point two\n## Step 1\n## Step 2\n## Step 3\n* Point three\n** Sub point 1\n** Sub point 2',
'\t<ul>\n\t\t<li>Point one</li>\n\t\t<li>Point two\n\t<ol>\n\t\t<li>Step 1</li>\n\t\t<li>Step 2</li>\n\t\t'
'<li>Step 3</li>\n\t</ol></li>\n\t\t<li>Point three\n\t<ul>\n\t\t<li>Sub point 1</li>\n\t\t'
'<li>Sub point 2</li>\n\t</ul></li>\n\t</ul>'),
('@array[4] = 8@', '\t<p><code>array[4] = 8</code></p>'),
('#{color:blue} one\n# two\n# three',
'\t<ol style="color:blue;">\n\t\t<li>one</li>\n\t\t<li>two</li>\n\t\t<li>three</li>\n\t</ol>'),
('Links (like "this":http://foo.com), are now mangled in 2.1.0, whereas 2.0 parsed them correctly.',
'\t<p>Links (like <a href="http://foo.com">this</a>), are now mangled in 2.1.0, whereas 2.0 parsed them correctly.</p>'),
('@monospaced text@, followed by text',
'\t<p><code>monospaced text</code>, followed by text</p>'),
)
def testKnownValues(self):
for t, h in self.known_values:
self.assertEqual(textile.textile(t), h)
def testFootnoteReference(self):
html = textile.textile('This is covered elsewhere[1].')
self.assertTrue(re.search('^\t<p>This is covered elsewhere<sup class="footnote"><a href="#fn[a-z0-9-]+">1</a></sup>.</p>$', html))
html = textile.textile('YACC[1]')
self.assertTrue(re.search('^\t<p>YACC<sup class="footnote"><a href="#fn[a-z0-9-]+">1</a></sup></p>', html))
def testFootnote(self):
html = textile.textile('fn1. Down here, in fact.')
self.assertTrue(re.search('^\t<p id="fn[a-z0-9-]+" class="footnote"><sup>1</sup>Down here, in fact.</p>$', html))
def testURLWithHyphens(self):
self.assertEqual(textile.textile('"foo":http://google.com/one--two'), '\t<p><a href="http://google.com/one--two">foo</a></p>')
def testUnicode(self):
self.assertEqual(textile.textile(u'hello\u4500world'), '\t<p>hello\xe4\x94\x80world</p>')
self.assertEqual(textile.textile(u'\u4500', encoding='utf8', output='utf8'), u'\t<p>\u4500</p>'.encode('utf8'))
if __name__ == "__main__":
unittest.main()
| Python |
#!/usr/bin/env python
#coding: utf-8
#
# scdd.py daemon process
#
# author: observer
# email: jingchaohu@gmail.com
# blog: http://obmem.com
# last edit @ 2009.12.19
import os,sys,time
import re
from daemon import Daemon
import sqlite3
import fetchvc
from download import httpfetch
from Queue import Queue
from threading import Thread
class MyDaemon(Daemon):
def __init__(self,path,pid):
self.path = path
self.q = Queue()
Daemon.__init__(self,pid)
def thread_fetch(self):
conn = sqlite3.connect(self.path+'/verycd.sqlite3.db')
conn.text_factory = str
while True:
topic = self.q.get()
try:
fetchvc.fetch(topic,conn)
except:
pass
self.q.task_done()
def run(self):
for i in range(8):
t = Thread(target=self.thread_fetch)
t.setDaemon(True)
t.start()
conn = sqlite3.connect(self.path+'/verycd.sqlite3.db')
conn.text_factory = str
while True:
try:
#check searchqueue every 10 secs
taskqueue = open(self.path+'/searchqueue','r').readlines()
print taskqueue,time.mktime(time.gmtime()),time.mktime(time.gmtime())%900
open(self.path+'/searchqueue','w').write('')
for task in taskqueue:
url = 'http://www.verycd.com/search/folders/'+task
print 'fetching', url, '...'
res = httpfetch(url)
print '...fetching completed'
topics = re.compile(r'/topics/(\d+)',re.DOTALL).findall(res)
topics = set(topics)
for topic in topics:
self.q.put(topic)
if taskqueue == []:
time.sleep(10)
# read feed every 900 secs
if time.mktime(time.gmtime())%800<10:
url = 'http://www.verycd.com/sto/feed'
print 'fetching feed ...'
feeds = httpfetch(url)
topics = re.compile(r'/topics/(\d+)',re.DOTALL).findall(feeds)
topics = set(topics)
print topics
now = time.mktime(time.gmtime())
for topic in topics:
self.q.put(topic)
# read hot everyday at gmt 19:00
timeofday = time.mktime(time.gmtime())%86400
if timeofday>68400 and timeofday < 68410:
url = 'http://www.verycd.com/'
print 'fetching homepage ...'
home = httpfetch(url)
hotzone = re.compile(r'热门资源.*?</dl>',re.DOTALL).search(home).group()
hot = re.compile(r'<a href="/topics/(\d+)/"[^>]*>(《.*?》)[^<]*</a>',re.DOTALL).findall(hotzone)
html = '<h2 style="color:red">每日热门资源</h2>\n'
for topic in hot:
print 'fetching hot topic',topic[0],'...'
self.q.put(topic[0])
html += ' <a target="_parent" href="/?id=%s">%s</a> \n' % topic
open(self.path+'/static/hot.html','w').write(html)
# update 20 whole pages at gmt 19:10
if timeofday>69000 and timeofday < 69010:
urlbase = 'http://www.verycd.com/sto/~all/page'
for i in range(1,20):
print 'fetching list',i,'...'
url = urlbase+str(i)
res = httpfetch(url)
res2 = re.compile(r'"topic-list"(.*?)"pnav"',re.DOTALL).findall(res)
if res2:
res2 = res2[0]
else:
continue
topics = re.compile(r'/topics/(\d+)',re.DOTALL).findall(res2)
topics = set(topics)
print topics
for topic in topics:
q.put(topic)
except:
time.sleep(10)
continue
if __name__ == "__main__":
path = os.path.dirname(os.path.realpath(sys.argv[0]))
daemon = MyDaemon(path=path,pid='/tmp/simplevc.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
daemon.start()
elif 'stop' == sys.argv[1]:
daemon.stop()
elif 'restart' == sys.argv[1]:
daemon.restart()
elif 'run' == sys.argv[1]:
daemon.run()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart" % sys.argv[0]
sys.exit(2)
| Python |
#!/usr/bin/env python
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# fetchvc.py fetch resources from verycd
#
# author: observer
# email: jingchaohu@gmail.com
# blog: http://obmem.com
# last edit @ 2009.12.16
import urllib
import re
import sqlite3
import time
import os,sys
from threading import Thread
from Queue import Queue
from download import httpfetch
path = os.path.dirname(os.path.realpath(sys.argv[0]))
conn = sqlite3.connect(path+'/verycd.sqlite3.db')
conn.text_factory = str
q = Queue()
MAXC = 8
def thread_fetch():
conn = sqlite3.connect(path+'/verycd.sqlite3.db')
conn.text_factory = str
while True:
topic = q.get()
fetch(topic,conn)
q.task_done()
def search(keyword,full=True):
'''search verycd, fetch search results'''
url = 'http://www.verycd.com/search/folders/'+keyword
print 'fetching search results ...'
res = httpfetch(url)
topics = re.compile(r'/topics/(\d+)',re.DOTALL).findall(res)
topics = set(topics)
links = []
if full:
links = re.compile(r'/search/folders/(.*?\?start=\d+)',re.DOTALL).findall(res)
print links
print topics
if topics:
for topic in topics:
q.put(topic)
if full and links:
for key in links:
search(key,full=False)
def hot():
''' read verycd hot res and keep update very day '''
url = 'http://www.verycd.com/'
print 'fetching homepage ...'
home = httpfetch(url)
hotzone = re.compile(r'热门资源.*?</dl>',re.DOTALL).search(home).group()
hot = re.compile(r'<a href="/topics/(\d+)/"[^>]*>(《.*?》)[^<]*</a>',re.DOTALL).findall(hotzone)
html = '<h2 style="color:red">每日热门资源</h2>\n'
for topic in hot:
print 'fetching hot topic',topic[0],'...'
q.put(topic[0])
html += ' <a target="_parent" href="/?id=%s">%s</a> \n' % topic
open(path+'/static/hot.html','w').write(html)
def feed():
''' read verycd feed and keep update very 30 min '''
url = 'http://www.verycd.com/sto/feed'
print 'fetching feed ...'
feeds = httpfetch(url)
ids = re.compile(r'/topics/(\d+)',re.DOTALL).findall(feeds)
ids = set(ids)
print ids
now = time.mktime(time.gmtime())
for id in ids:
q.put(id)
#updtime = fetch(id)
#updtime = time.mktime(time.strptime(updtime,'%Y/%m/%d %H:%M:%S'))-8*3600 #gmt+8->gmt
#diff = now - updtime
#print '%10s secs since update' % (diff)
#if diff > 1900: # only need recent 30min updates
# break
def update(num=10):
urlbase = 'http://www.verycd.com/sto/~all/page'
for i in range(1,num+1):
print 'fetching list',i,'...'
url = urlbase+str(i)
res = httpfetch(url)
res2 = re.compile(r'"topic-list"(.*?)"pnav"',re.DOTALL).findall(res)
if res2:
res2 = res2[0]
else:
continue
topics = re.compile(r'/topics/(\d+)',re.DOTALL).findall(res2)
topics = set(topics)
print topics
for topic in topics:
q.put(topic)
def fetchall(ran='1-max',debug=False):
urlbase = 'http://www.verycd.com/archives/'
if ran == '1-max':
m1 = 1
res = urllib.urlopen(urlbase).read()
m2 = int(re.compile(r'archives/(\d+)').search(res).group(1))
else:
m = ran.split('-')
m1 = int(m[0])
m2 = int(m[1])
print 'fetching list from',m1,'to',m2,'...'
for i in range(m1,m2+1):
url = urlbase + '%05d'%i + '.html'
print 'fetching from',url,'...'
res = httpfetch(url)
ids = re.compile(r'topics/(\d+)/',re.DOTALL).findall(res)
print ids
for id in ids:
q.put(id)
def fetch(id,conn=conn,debug=False):
print 'fetching topic',id,'...'
urlbase = 'http://www.verycd.com/topics/'
url = urlbase + str(id)
res = ''
for _ in range(3):
try:
res = httpfetch(url,report=True)
break
except:
continue
abstract = re.compile(r'<h1>.*?visit',re.DOTALL).findall(res)
if not abstract:
print res
if res == '' or '很抱歉' in res:
print 'resource does not exist'
return
else:
print 'fetching',id,'again...'
return fetch(id,conn)
abstract = abstract[0]
title = re.compile(r'<h1>(.*?)</h1>',re.DOTALL).findall(abstract)[0]
status = re.compile(r'"requestWords">(.*?)<',re.DOTALL).search(abstract).group(1)
brief = re.compile(r'"font-weight:normal"><span>(.*?)</td>',re.DOTALL).search(abstract).group(1)
brief = re.compile(r'<.*?>',re.DOTALL).sub('',brief).strip()
pubtime = re.compile(r'"date-time">(.*?)</span>.*?"date-time">(.*?)</span>',re.DOTALL).findall(abstract)[0]
category1 = re.compile(r'分类.*?<td>(.*?) (.*?) ',re.DOTALL).findall(abstract)[0]
category = ['','']
category[0] = re.compile(r'<.*?>',re.DOTALL).sub('',category1[0]).strip()
category[1] = re.compile(r'<.*?>',re.DOTALL).sub('',category1[1]).strip()
res2 = re.compile(r'iptcomED2K"><!--eMule.*?<!--eMule end-->',re.DOTALL).findall(res)[0]
ed2k = re.compile(r'ed2k="([^"]*)" subtitle_[^=]*="([^"]*)">([^<]*)</a>',re.DOTALL).findall(res2)
ed2k.extend( re.compile(r'ed2k="([^"]*)">([^<]*)</a>',re.DOTALL).findall(res2) )
content = re.compile(r'<!--eMule end-->(.*?)<!--Wrap-tail end-->',re.DOTALL).findall(res)
if content:
content = content[0]
content = re.compile(r'<br />',re.DOTALL).sub('\n',content)
content = re.compile(r'<.*?>',re.DOTALL).sub('',content)
content = re.compile(r'&.*?;',re.DOTALL).sub(' ',content)
content = re.compile(r'\n\s+',re.DOTALL).sub('\n',content)
content = content.strip()
else:
content=''
if debug:
print title
print status
print brief
print pubtime[0],pubtime[1]
print category[0],category[1]
for x in ed2k:
print x
print content
ed2kstr = ''
for x in ed2k:
ed2kstr += '`'.join(x)+'`'
if not dbfind(id,conn):
dbinsert(id,title,status,brief,pubtime,category,ed2kstr,content,conn)
else:
dbupdate(id,title,status,brief,pubtime,category,ed2kstr,content,conn)
return pubtime[1]
def dbcreate():
c = conn.cursor()
c.execute('''create table verycd(
verycdid integer primary key,
title text,
status text,
brief text,
pubtime text,
updtime text,
category1 text,
category2 text,
ed2k text,
content text
)''')
conn.commit()
c.close()
def dbinsert(id,title,status,brief,pubtime,category,ed2k,content,conn):
c = conn.cursor()
c.execute('insert into verycd values(?,?,?,?,?,?,?,?,?,?)',\
(id,title,status,brief,pubtime[0],pubtime[1],category[0],category[1],\
ed2k,content))
conn.commit()
c.close()
def dbupdate(id,title,status,brief,pubtime,category,ed2k,content,conn):
c = conn.cursor()
c.execute('update verycd set verycdid=?,title=?,status=?,brief=?,pubtime=?,\
updtime=?,category1=?,category2=?,ed2k=?,content=? where verycdid=?',\
(id,title,status,brief,pubtime[0],pubtime[1],category[0],category[1],\
ed2k,content,id))
conn.commit()
c.close()
def dbfind(id,conn):
c = conn.cursor()
c.execute('select 1 from verycd where verycdid=?',(id,))
c.close()
for x in c:
if 1 in x:
return True
else:
return False
def dblist():
c = conn.cursor()
c.execute('select * from verycd')
for x in c:
for y in x:
print y
def usage():
print '''Usage:
python fetchvc.py createdb
python fetchvc.py fetchall
python fetchvc.py fetch 1-1611 #fetch archive list
python fetchvc.py fetch 5633~5684 #fetch topics
python fetchvc.py fetch 5633 #fetch a topic
python fetchvc.py fetch q=keyword
python fetchvc.py list #list the database
python fetchvc.py feed #run every 30 min to keep up-to-date
python fetchvc.py hot
python fetchvc.py update #update first 20 pages, run on a daily basis'''
if __name__=='__main__':
#initialize thread pool
for i in range(MAXC):
t = Thread(target=thread_fetch)
t.setDaemon(True)
t.start()
if len(sys.argv) == 1:
usage()
elif len(sys.argv) == 2:
if sys.argv[1] == 'createdb':
dbcreate()
elif sys.argv[1] == 'fetchall':
fetchall()
elif sys.argv[1] == 'update':
update(20)
elif sys.argv[1] == 'update1':
update(1)
elif sys.argv[1] == 'feed':
feed()
elif sys.argv[1] == 'hot':
hot()
elif sys.argv[1] == 'list':
dblist()
elif len(sys.argv) == 3:
if sys.argv[1] != 'fetch':
usage()
elif '-' in sys.argv[2]:
fetchall(sys.argv[2])
elif '~' in sys.argv[2]:
m = sys.argv[2].split('~')
for i in range(int(m[0]),int(m[1])+1):
q.put(i)
elif sys.argv[2].startswith("q="):
search(sys.argv[2][2:])
else:
fetch(int(sys.argv[2]),debug=True)
# wait all threads done
q.join()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# download.py: download with report
#
# author: observer
# email: jingchaohu@gmail.com
# blog: http://obmem.com
# last edit @ 2009.12.16
import os,sys
import urllib2
from time import time,sleep
path = os.path.dirname(os.path.realpath(sys.argv[0]))
#proxies = {'http':'http://proxyaddress:port'}
#proxy_support = urllib2.ProxyHandler(proxies)
#opener = urllib2.build_opener(proxy_support, urllib2.HTTPHandler)
#urllib2.install_opener(opener)
#functions
def report(blocknum, bs, size, t):
if t == 0:
t = 1
if size == -1:
print '%10s' % (str(blocknum*bs)) + ' downloaded | Speed =' + '%5.2f' % (bs/t/1024) + 'KB/s'
else:
percent = int(blocknum*bs*100/size)
print '%10s' % (str(blocknum*bs)) + '/' + str(size) + 'downloaded | ' + str(percent) + '% Speed =' + '%5.2f'%(bs/t/1024) + 'KB/s'
def httpfetch(url, headers={}, reporthook=report, postData=None, report=True):
ok = False
for _ in range(10):
try:
reqObj = urllib2.Request(url, postData, headers)
fp = urllib2.urlopen(reqObj)
headers = fp.info()
ok = True
break
except:
sleep(1)
continue
if not ok:
open(path+'/errors','a').write(url+'\n')
return ''
rawdata = ''
bs = 1024*8
size = -1
read = 0
blocknum = 0
if reporthook and report:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size, 1)
t0 = time()
while 1:
block = ''
try:
block = fp.read(bs)
except:
open(path+'/errors','a').write(url+'\n')
return ''
if block == "":
break
rawdata += block
read += len(block)
blocknum += 1
if reporthook and report:
reporthook(blocknum, bs, size, time()-t0)
t0 = time()
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError("retrieval incomplete: got only %i out "
"of %i bytes" % (read, size), result)
return rawdata
if __name__ == '__main__':
url = 'http://www.verycd.com'
#test it
data = httpfetch(url)
open('down','w').write(data)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# code.py: based on web.py
#
# author: observer
# email: jingchaohu@gmail.com
# blog: http://obmem.com
# last edit @ 2009.12.16
import web
import sqlite3
web.config.debug = False
db = web.database(dbn='sqlite', db='verycd.sqlite3.db')
urls = (
'/', 'index',
)
render = web.template.render('templates/')
app = web.application(urls, globals())
class index:
def GET(self):
i = web.input(id=None,page='1',q=None,download=None)
if i.id:
myvar = dict(id=i.id)
rec = db.select('verycd',vars=myvar,where="verycdid=$id")
for r in rec:
fl = None
if i.download:
links = r['ed2k'].split('`')
links = [ x for x in links if 'ed2k:' in x ]
fl = '<br>\n'.join(links)
return render.id([r,fl,str(r['verycdid'])])
return render.error(404)
else:
if not i.q:
vc = db.select('verycd',order='updtime DESC',limit=20,offset=20*(int(i.page)-1))
num = db.select('verycd',what="count(*) as count")[0].count
arg = '/?page'
else:
qs = i.q.split(' ')
qs = [ 'title like \'%'+x+'%\'' for x in qs ]
where = ' and '.join(qs)
vc = db.select('verycd',order='updtime DESC',limit=20,\
offset=20*(int(i.page)-1),where=where)
num = db.select('verycd',what="count(*) as count",where=where)[0].count
arg = '/?q='+i.q+'&page'
prev = int(i.page)-1 == 0 and '1' or str(int(i.page)-1)
next = int(i.page)+1 <= (num-1)/20+1 and str(int(i.page)+1) or i.page
end = str((num-1)/20+1)
pages = [prev,next,end]
left = min(4,int(i.page)-1)
right = min(4,int(end)-int(i.page))
if left < 4:
right = min(8-left,int(end)-int(i.page))
if right < 4:
left = min(8-right,int(i.page)-1)
while left > 0:
pages.append(str(int(i.page)-left))
left -= 1
j = 0
while j <= right:
pages.append(str(int(i.page)+j))
j += 1
return render.index([vc,pages,arg,i.q,num])
if __name__ == "__main__":
web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)
app.run()
| Python |
# -*- coding: utf-8 -*-
import sys, subprocess as proc
from tkinter import Tk, messagebox
SUCCESS = 0
ERROR = 1
def main():
if (sys.version_info.major < 3):
error_box('Python too old. Precommit script needs at least Python v3.x.\n'
'You have v%s'%(sys.version))
try:
hgbranch = proc.check_output(['hg', 'branch'], universal_newlines=True)
hgbranch = hgbranch.strip()
proc.check_call(['hg', 'incoming', '-b', hgbranch])
except proc.CalledProcessError as err:
if err.returncode == 1:
# no incoming changesets found, so this is in fact a success ;)
return SUCCESS
else:
error_box('An error occurred while checking for incoming changesets. Commit aborted.\n'
'Details:\n%s'%(str(err)))
return err.returncode
if yesno_box('You did not pull before trying to commit but there are\n'
'new changesets waiting on the server.\n\n'
'Commit anyway?'):
return SUCCESS
else:
return ERROR
def error_box(msg):
Tk().withdraw()
messagebox.showerror('Error', msg)
def yesno_box(msg):
Tk().withdraw()
return messagebox.askyesno('Warning', msg)
if __name__ == "__main__":
sys.exit(main())
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.