code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registration for data sources."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import logging
from models import jobs
from models.data_sources import base_types
class _Registry(object):
_data_source_classes = []
_data_source_names = {}
@classmethod
def register(cls, clazz):
# Package private: pylint: disable=protected-access
if not issubclass(clazz, base_types._DataSource):
raise ValueError(
'All registered data sources must ultimately inherit '
'from models.data_source.data_types._DataSource; '
'"%s" does not.' % clazz.__name__)
if hasattr(clazz, 'get_name'):
if clazz.get_name() in cls._data_source_names:
raise ValueError(
'Cannot register class with name "%s"; class %s has '
'already registered that name.' % (
clazz.get_name(),
cls._data_source_names[clazz.get_name()]))
cls._data_source_names[clazz.get_name()] = clazz
clazz.verify_on_registration()
cls._data_source_classes.append(clazz)
@classmethod
def unregister(cls, clazz):
if clazz in cls._data_source_classes:
cls._data_source_classes.remove(clazz)
if hasattr(clazz, 'get_name'):
try:
del cls._data_source_names[clazz.get_name()]
except KeyError:
logging.critical(
'Trying to unregister name "%s" for source class %s, '
'but this name was not registered when the class was.',
clazz.get_name(), clazz.__name__)
else:
logging.error('Trying to unregister data source class %s, '
'but this class is not currently registered',
clazz.__name__)
@classmethod
def get_rest_data_source_classes(cls):
return [c for c in cls._data_source_classes
# Package private: pylint: disable=protected-access
if issubclass(c, base_types._AbstractRestDataSource)]
@classmethod
def is_registered(cls, clazz):
return clazz in cls._data_source_classes
@classmethod
def get_generator_classes(cls):
ret = set()
for c in cls._data_source_classes:
for g in c.required_generators():
if issubclass(g, jobs.DurableJobBase):
ret.add(g)
return ret
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing base types for analytics."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import re
class _DataSource(object):
"""Common base class for all kinds of data sources."""
@staticmethod
def required_generators():
"""Tell what long-running jobs (if any) are required for this source.
Return an array of classes. The empty array is allowed if
your data source does not depend on any data sources. All
data sources named must derive from DurableJobBase. When the
framework calls to the display content generator function, the
jobs will be provided singly as parameters. E.g., if you
return [FooGenerator, BarGenerator] here, your fill_values
method should be declared:
@staticmethod
def fill_values(app_context, template_values, foo_generator_job,
bar_generator_job)
foo_results = transforms.loads(foo_generator_job.load().output)
template_values['foo_widgets'] = foo_results['widgets']
template_values['foo_frammis'] = foo_results['frammis']
... and similarly for bar_generator_job. ...
Returns:
Array of types derived from DurableJobBase. May be empty list.
"""
return []
@classmethod
def verify_on_registration(cls):
"""Override to perform sanity-checking at registration time."""
pass
class _SynchronousQuery(_DataSource):
"""Inherit from this class to indicate your data source is synchronous.
By synchronous, we mean that when the dashboard display is
created, we directly generate HTML from a template and parameters,
as opposed to asynchronously fetching data up to the page (via
JavaScript) after the page has loaded.
It is OK for a data source to inherit both from SynchronousQuery
and from _AbstractRestDataSource. In this situation, fill_values() will
be called synchronously, and fetch_values asynchronously when
the JavaScript on the page makes a JSON request.
"""
@staticmethod
def fill_values(app_context, template_values, required_generator_job_1,
required_generator_job_2, required_generator_job_N):
"""Set key/value strings for use in HTML template expansion.
Args:
app_context: the context taking the request. This can be used to
identify the namespace for the request.
template_values: A hash to be filled in by fill_values. Its
contents are provided to the template interpreter. All
sources used by a single analytic contribute to the same
template_values dict; be careful to avoid name collisions.
required_generator_job_1: Your class may declare that it it needs
one or more generators from which to obtain data values. You
do this by overriding _DataSource.required_generators() to
name the class definitions. When this function is called, the
Job objects corresponding to those generators are passed in to
this function (in the same order in which they were specified
in the return value from required_generators(). Your class
should then extract the results from the job and return them
from this function.
required_generator_job_2: as required_generator_job_1
required_generator_job_N: as required_generator_job_1
Returns:
Return value is ignored.
"""
raise NotImplementedError(
'Classes which synchronously provide parameters for '
'expansion into their HTML templates must implement the '
'fill_values method.')
class _AbstractRestDataSource(_DataSource):
"""Provide paginated data supplied to clients via a REST API.
This data source will be served from a REST-style URL. The
canonical use of this data is to provide raw input to JavaScript
charting/graphing libraries on the dashboard's analytics page.
However, the source is available for any authorized user to make
use of however he sees fit.
It is OK for a data source to inherit both from SynchronousQuery
and from _AbstractRestDataSource. In this situation, fill_values() will
be called synchronously, and fetch_values asynchronously when
the JavaScript on the page makes a JSON request.
"""
# This limit is based on manual experiments with CourseBuilder and
# simulated randomized student data. 10,000 is plenty fast,
# and 400,000 items causes a several-second delay in repainting of
# graphs. Picking 10,000 for safety against larger object sizes.
RECOMMENDED_MAX_DATA_ITEMS = 10000
# Mantain registry of sources by name so that we can guarantee that
# names will be unique.
_rest_data_sources_by_name = {}
@classmethod
def get_name(cls):
raise NotImplementedError(
'Classes derived from _AbstractRestDataSource must provide a name '
'by which they are known. This name must be composed only '
'of lowercase alphabetics, numerics and underscores. (This'
'name will, among other uses, be employed to create '
'JavaScript identifiers.) Also, this name must be globally '
'unique within a CourseBuilder installation.')
@classmethod
def get_title(cls):
raise NotImplementedError(
'Classes derived from _AbstractRestDataSource must provide a '
'title string for display on web pages. This is used in the '
'context of controls to select a particular page.')
@classmethod
def exportable(cls):
return False
@classmethod
def get_default_chunk_size(cls):
"""Tell what the recommended number of items per page is.
This will vary based on the sizes of the items returned. Note that
this is not an absolute maximum; the UI may request more than this
value (up to the absolute maximum imposed by App Engine overall
response size limits).
This value can be set to zero to indicate that the resource does not
support or require paging. This is useful for, e.g., course-level
items (units, assessments) of which we expect to have never more than
tens to hundreds.
Returns:
Recommended maximum items per page of data source items
"""
return cls.RECOMMENDED_MAX_DATA_ITEMS
@classmethod
def get_context_class(cls):
raise NotImplementedError(
'Classes derived from _AbstractRestDataSource must provide a class '
'inherited from _AbstractContextManager. This class should handle '
'building and maintaining a context used for storing '
'parameters and optimizations for fetching values. If '
'no context is needed, the NullContextManager class may '
'be returned from this function.')
@classmethod
def get_schema(cls, app_context, log, source_context):
raise NotImplementedError(
'Classes derived from _AbstractRestDataSource must be able to '
'statically produce a JSON schema describing their typical '
'contents. This function must return a dict as produced by '
'FieldRegistry.get_json_schema_dict().')
@classmethod
def fetch_values(cls, app_context, source_context, schema, log,
page_number, foo_job):
"""Provide data to be returned from this source.
This function should return a plain Python array of dicts. (The point
here is that the data must not require any postprocessing before it is
converted to a JSON string.)
Args:
app_context: The application context for the current request;
useful for namespacing any datastore queries you may need to
perform.
source_context: A context instance/object as produced from a
_AbstractContextManager.build_from_web_request() or
build_from_dict() call. This class specifies the exact
sub-type of _AbstractContextManager that should be used with
it, so the specific type of context object can be relied upon.
schema: A schema, as returned from the get_schema() method. It is
possible that the schema may need to be modified as the result
of a get_data operation -- e.g., to include fields that are
present in the actual data even though not mentioned in the
formal type definition, and so this field is provided to
the fetch_values() operation, just-in-case.
log: A Log instance; use this to remark on any problems or
progress during processing.
page_number: The number of the page of data items desired.
foo_job: One parameter for each of the job classes returned by
required_generators() (if any), in that same order. These are
passed as separate parameters for convenience of naming in
your code.
"""
raise NotImplementedError(
'Data sources which provide asynchronous feeds must '
'implement the fetch_values() method.')
@classmethod
def verify_on_registration(cls):
source_name = cls.get_name()
if not re.match('^[_0-9a-z]+$', source_name):
raise ValueError(
'REST data source name "%s" ' % source_name +
'must contain only lowercase letters, '
'numbers or underscore characters')
other_class = cls._rest_data_sources_by_name.get(
source_name, None)
if other_class:
raise ValueError(
'Error: the name "%s" ' % source_name +
'is already registered to the class ' +
'%s' % other_class.__module__ +
'.%s' % other_class.__name__ +
'; you cannot register '
'%s' % cls.__module__ +
'.%s' % cls.__name__ +
'with the same name.')
class _AbstractContextManager(object):
"""Interface for managing contexts used by _AbstractRestDataSource types.
When a REST request is made, a context is returned along with the data and
other items. Subsequent REST requests should provide the context object.
This permits the data fetching class to retain state across operations.
Generally, a _AbstractContextManager type will be quite specific to the
type of _AbstractRestDataSource. However, the responsibilities of context
management are quite specific and thus these are separated into a distinct
interface.
Note that all the methods in this class are specifed as @classmethod.
This is intentional: It permits this class to return either instances
of itself, another type, or a simple dict.
"""
@classmethod
def build_from_web_request(cls, params, default_chunk_size):
"""Build a context instance given a set of URL parameters."""
raise NotImplementedError(
'Subclasses of _AbstractContextManager must implement a function '
'to read URL parameters specific to a context/data-source '
'and convert that into a context object.'
''
'NOTE: If there are _no_ parameters, this method should '
'return None. This allows us callers to pass only the '
'source_context parameter and not have to re-specify '
'query parameters on each request. (If this function returns '
'a default context, it will likely mismatch with the previous '
'version, and the old version will be discarded, losing '
'an opportunity for optimizing queries.')
@classmethod
def save_to_dict(cls, context):
"""Convert a context into a simple Python dict."""
raise NotImplementedError(
'Subclasses of _AbstractContextManager must provide a method to '
'convert a context into a simple dict to permit serialization of '
'the context so that it may be encrypted and returned along with '
'the rest of the REST response.')
@classmethod
def build_from_dict(cls, prev_dict):
"""Build a context from a dict previously returned by save_to_dict()."""
raise NotImplementedError(
'When a REST call returns, the save_to_dict() method is called to '
'convert the context object into a simple Python dict to permit '
'serialization. This is then serialized, encrypted, and '
'returned to the caller. On subsequent calls, the caller '
'provides the returned context parameter. This is decrypted '
'and reified into a dict. This method should convert '
'that dict back into a context.')
@classmethod
def build_blank_default(cls, params, default_chunk_size):
"""Build a default version of the context."""
raise NotImplementedError(
'When build_from_web_request() returns None, this function is used '
'to build a default version of a context.')
@classmethod
def get_public_params_for_display(cls, context):
"""Provide a human-readable version of the context."""
raise NotImplementedError(
'Subclasses of _AbstractContextManager must provide a method to '
'render a context\'s main features as a simple Python type '
'(usually a dict of name/value pairs). This is returned in the '
'body of REST responses so that humans can manually inspect and '
'verify operation during development.')
@classmethod
def equivalent(cls, context_one, context_two):
"""Tell whether two contexts are equivalent."""
raise NotImplementedError(
'Subclasses of _AbstractContextManager must provide a method to '
'tell whether two contexts are equivalent. REST requests may '
'contain a previously-saved context as well as HTML parameters. '
'If the context built from one does not match the context built '
'from the other, the old context must be discarded. Note that '
'not all fields need to match for contexts to be equivalent; '
'only the fields that define the data return need to be '
'identical. Any saved state used for optimization need not be '
'(and will probably not be) present in the HTML parameters.')
class _NullContextManager(_AbstractContextManager):
"""An _AbstractContextManager used when a real context is not required."""
@classmethod
def build_from_web_request(cls, params, default_chunk_size):
return {'null_context': 'null_context'}
@classmethod
def build_from_dict(cls, prev_dict):
return {'null_context': 'null_context'}
@classmethod
def save_to_dict(cls, context):
return context
@classmethod
def build_blank_default(cls, params, default_chunk_size):
return {'null_context': 'null_context'}
@classmethod
def get_public_params_for_display(cls, context):
return context
@classmethod
def equivalent(cls, context_one, context_two):
return context_one == context_two
class _AbstractSmallRestDataSource(_AbstractRestDataSource):
"""Default methods for data source classes not requiring a context.
This is most commonly the case when a REST data source is based on
a resource that is always going to be small enough to be sent in
a single page. E.g., items based on course items. There will at
most be hundreds, or possibly thousands of units, questions, etc.
This is well within the recommended limit of 10,000.
"""
@classmethod
def get_context_class(cls):
# Expect at most hundreds of course elements; no need for pagination,
# so null context is fine.
return _NullContextManager
@classmethod
def get_default_chunk_size(cls):
return 0 # Meaning we don't require or support paginated access.
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic webapp2 handler for REST data sources."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from common import catch_and_log
from common import crypto
from controllers import utils
from models import roles
from models import transforms
from models.data_sources import utils as data_sources_utils
class _AbstractRestDataSourceHandler(utils.ApplicationHandler):
"""Webapp2 handler for REST data sources.
This class must be derived from to override the get_data_source_class()
method. This should be done only from analytics.py's registration-time
functions which take care of registering URLs to serve REST resources
(which is why this class is marked private).
The responsibilities of this class are to provide a standardized interface
which accepts URL parameters to identify resources, and JSON output to
feed external clients. It is expected that a very common use case for
clients is the visual display of information on dashboard pages. This,
however, should in no way preclude the use of this interface to gain
access to paginated versions of data held within CourseBuilder.
Data source types supported are defined by the
base_types.AbstractRestDataSource and base_types.ContextManager
interface, which this class uses to accomplish its task.
All AbstractRestDataSource URLs have one parameter in common:
page_number=<N>: Specify which page of data is wanted. This is
zero-based. Not all AbstractRestDataSource implementations have an
easy way to know which is the last page until its data is fetched.
Further, the "last" page may not always be last -- over time,
more data may accumulate in the store being accessed.
If this value is not provided, it is assumed to be zero.
"""
@classmethod
def get_data_source_class(cls):
raise NotImplementedError(
'_RestDataSourceHandler is a base class; derived classes '
'must implement the get_data_source_class() method to tell the '
'base class the type of the DB table it is to wrap.')
def get(self):
"""Returns a JSON response with a page of data and meta-information.
The object contains the following fields:
data: Data objects from the object.
log: Entries made with a base_types.Log object. These contain:
timestamp: Stringified version of the GMT time of the event
level: one of 'severe', 'warning', or 'info'
message: A string describing the event.
schema: A JSON schema describing the names and types of objects
in the 'data' payload.
params: A dictionary containing an echo of the context parameters
passed in. These are specific to the sub-type of REST data source.
source_context: Any context that the REST data source wishes to
retain across multiple calls to the same REST object. It is
not strictly required to re-send this into subsequent requests
(as a parameter named 'source_context'), but doing so will provide
significant performance improvements. Note that if you are sending
a 'source_context' parameter, it is not necessary to re-specify
the set of parameters defining your query each time; these are
retained in the context. If you pass parameters which do not
exactly match those in the source_context, the source_context
is not used, and a new version with your new parameters is returned.
"""
if (not roles.Roles.is_super_admin() and
not roles.Roles.is_course_admin(self.app_context)):
self.response.set_status(403)
self.response.write('Forbidden')
return
catch_and_log_ = catch_and_log.CatchAndLog()
data_source_class = self.get_data_source_class()
context_class = data_source_class.get_context_class()
page_number = int(self.request.get('page_number') or '0')
output = {}
source_context = None
schema = None
jobz = None
with catch_and_log_.consume_exceptions('Building parameters'):
source_context = self._get_source_context(
data_source_class.get_default_chunk_size(), catch_and_log_)
with catch_and_log_.consume_exceptions('Getting data schema'):
schema = data_source_class.get_schema(
self.app_context, catch_and_log_, source_context)
output['schema'] = schema
with catch_and_log_.consume_exceptions('Loading required job output'):
jobz = data_sources_utils.get_required_jobs(
data_source_class, self.app_context, catch_and_log_)
if source_context and schema and jobz is not None:
with catch_and_log_.consume_exceptions('Fetching results data'):
data, page_number = data_source_class.fetch_values(
self.app_context, source_context, schema, catch_and_log_,
page_number, *jobz)
output['data'] = data
output['page_number'] = page_number
with catch_and_log_.consume_exceptions('Encoding context'):
output['source_context'] = self._encode_context(source_context)
output['params'] = context_class.get_public_params_for_display(
source_context)
output['log'] = catch_and_log_.get()
output['source'] = data_source_class.get_name()
self.response.headers['Content-Type'] = (
'application/javascript; charset=utf-8')
self.response.headers['X-Content-Type-Options'] = 'nosniff'
self.response.headers['Content-Disposition'] = 'attachment'
self.response.write(transforms.JSON_XSSI_PREFIX +
transforms.dumps(output))
def _encode_context(self, source_context):
"""Save context as opaque string for use as arg to next call."""
context_class = self.get_data_source_class().get_context_class()
context_dict = context_class.save_to_dict(source_context)
plaintext_context = transforms.dumps(context_dict)
return crypto.EncryptionManager.encrypt_to_urlsafe_ciphertext(
plaintext_context)
def _get_source_context(self, default_chunk_size, catch_and_log_):
"""Decide whether to use pre-built context or make a new one.
Callers to this interface may provide source-specific parameters to
indicate what portion of the data source they are interested in, or
pass in a pre-built context (as returned from _encode_context, above)
returned by a previous request, or both.
The preference is to use the encoded context, as long as it is
provided and it is compatible with the individual source selection
arguments which may be present. This is done because the context
may contain additional information that allows more efficient
processing.
Args:
default_chunk_size: Recommended maximum number of data items
in a page from the data_source.
catch_and_log_: An object which is used to convert exceptions
into messages returned to our REST client, and can also be
used for informational annotations on progress.
Returns:
context object common to many functions involved in generating
a data flow's JSON result.
"""
context_class = self.get_data_source_class().get_context_class()
new_context = context_class.build_from_web_request(self.request,
default_chunk_size)
existing_context = None
with catch_and_log_.consume_exceptions('Problem decrypting context'):
existing_context = self._get_existing_context(context_class)
ret = None
if new_context and not existing_context:
catch_and_log_.info('Creating new context for given parameters')
ret = new_context
elif existing_context and not new_context:
catch_and_log_.info('Continuing use of existing context')
ret = existing_context
elif not new_context and not existing_context:
catch_and_log_.info('Building new default context')
ret = context_class.build_blank_default(self.request,
default_chunk_size)
elif not context_class.equivalent(new_context, existing_context):
catch_and_log_.info(
'Existing context and parameters mismatch; discarding '
'existing and creating new context.')
ret = new_context
else:
catch_and_log_.info(
'Existing context matches parameters; using existing context')
ret = existing_context
return ret
def _get_existing_context(self, context_class):
"""Obtain and decode existing context, if present."""
context_param = self.request.get('source_context')
if not context_param:
return None
plaintext_context = (
crypto.EncryptionManager.decrypt_from_urlsafe_ciphertext(
str(context_param)))
dict_context = transforms.loads(plaintext_context)
return context_class.build_from_dict(dict_context)
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions common to data sources module."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from models import jobs
DATA_SOURCE_ACCESS_XSRF_ACTION = 'data_source_access'
def generate_data_source_token(xsrf):
"""Generate an XSRF token used to access data source, and protect PII."""
return xsrf.create_xsrf_token(DATA_SOURCE_ACCESS_XSRF_ACTION)
def get_required_jobs(data_source_class, app_context, catch_and_log_):
ret = []
for required_generator in data_source_class.required_generators():
job = required_generator(app_context).load()
if not job:
catch_and_log_.critical('Job for %s has never run.' %
required_generator.__name__)
return None
elif not job.has_finished:
catch_and_log_.critical('Job for %s is still running.' %
required_generator.__name__)
return None
elif job.status_code == jobs.STATUS_CODE_FAILED:
catch_and_log_.critical('Job for %s failed its last run.' %
required_generator.__name__)
return None
else:
ret.append(job)
return ret
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Access AppEngine DB tables via AbstractRestDataSource interface."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import copy
import functools
import re
from common import crypto
from common.utils import Namespace
from models import entity_transforms
from models import transforms
from models.data_sources import base_types
from models.data_sources import utils as data_sources_utils
from google.appengine.ext import db
# Package-protected pylint: disable=protected-access
class _AbstractDbTableRestDataSource(base_types._AbstractRestDataSource):
"""Implements a paged view against a single DB table."""
@classmethod
def get_entity_class(cls):
raise NotImplementedError(
'Do not use this class directly; call paginated_table_source() '
'to build a curried version.')
@classmethod
def get_name(cls):
raise NotImplementedError(
'Do not use this class directly; call paginated_table_source() '
'to build a curried version.')
@classmethod
def get_context_class(cls):
return _DbTableContext
@classmethod
def get_schema(cls, app_context, log, source_context):
clazz = cls.get_entity_class()
if source_context.send_uncensored_pii_data:
registry = entity_transforms.get_schema_for_entity_unsafe(clazz)
else:
registry = entity_transforms.get_schema_for_entity(clazz)
return registry.get_json_schema_dict()['properties']
@classmethod
def fetch_values(cls, app_context, source_context, schema, log,
sought_page_number, *unused_jobs):
with Namespace(app_context.get_namespace_name()):
stopped_early = False
while len(source_context.cursors) < sought_page_number:
page_number = len(source_context.cursors)
query = cls._build_query(source_context, schema, page_number,
log)
rows = cls._fetch_page(source_context, query, page_number, log)
# Stop early if we notice we've hit the end of the table.
if len(rows) < source_context.chunk_size:
log.warning('Fewer pages available than requested. '
'Stopping at last page %d' % page_number)
stopped_early = True
break
if not stopped_early:
page_number = sought_page_number
query = cls._build_query(source_context, schema, page_number,
log)
rows = cls._fetch_page(source_context, query, page_number, log)
# While returning a page with _no_ items for the 'last' page
# is technically correct, it tends to have unfortunate
# consequences for dc/crossfilter/d3-based displays.
if not rows:
page_number = sought_page_number - 1
log.warning('Fewer pages available than requested. '
'Stopping at last page %d' % page_number)
query = cls._build_query(source_context, schema,
page_number, log)
rows = cls._fetch_page(source_context, query,
page_number, log)
return cls._postprocess_rows(
app_context, source_context, schema, log, page_number, rows
), page_number
@classmethod
def _postprocess_rows(cls, unused_app_context, source_context,
schema, unused_log, unused_page_number,
rows):
transform_fn = cls._build_transform_fn(source_context)
if source_context.send_uncensored_pii_data:
entities = [row.for_export_unsafe() for row in rows]
else:
entities = [row.for_export(transform_fn) for row in rows]
dicts = [transforms.entity_to_dict(entity) for entity in entities]
return [transforms.dict_to_json(d, schema) for d in dicts]
@classmethod
def _build_query(cls, source_context, schema, page_number, log):
query = cls.get_entity_class().all()
cls._add_query_filters(source_context, schema, page_number, query)
cls._add_query_orderings(source_context, schema, page_number, query)
cls._add_query_cursors(source_context, schema, page_number, query, log)
return query
FILTER_RE = re.compile('^([a-zA-Z0-9_]+)([<>=]+)(.*)$')
SUPPORTED_OPERATIONS = ['=', '<', '>', '>=', '<=']
@classmethod
def _add_query_filters(cls, source_context, schema, page_number, query):
for filter_spec in source_context.filters:
parts = cls.FILTER_RE.match(filter_spec)
if not parts:
raise ValueError(
'Filter specification "%s" ' % filter_spec +
'is not of the form: <name><op><value>')
name, op, value = parts.groups()
if op not in cls.SUPPORTED_OPERATIONS:
raise ValueError(
'Filter specification "%s" ' % filter_spec +
'uses an unsupported comparison operation "%s"' % op)
if name not in schema:
raise ValueError(
'Filter specification "%s" ' % filter_spec +
'calls for field "%s" ' % name +
'which is not in the schema for '
'type "%s"' % cls.get_entity_class().__name__)
converted_value = transforms.json_to_dict(
{name: value},
{'properties': {name: schema[name]}})[name]
query.filter('%s %s' % (name, op), converted_value)
@classmethod
def _add_query_orderings(cls, source_context, schema, page_number, query):
for ordering in source_context.orderings:
query.order(ordering)
@classmethod
def _add_query_cursors(cls, source_context, schema, page_number, query,
log):
start_cursor = source_context.cursors.get(str(page_number), None)
end_cursor = source_context.cursors.get(str(page_number + 1), None)
log.info('fetch page %d start cursor %s; end cursor %s' %
(page_number,
'present' if start_cursor else 'missing',
'present' if end_cursor else 'missing'))
query.with_cursor(start_cursor=start_cursor, end_cursor=end_cursor)
@classmethod
def _fetch_page(cls, source_context, query, page_number, log):
limit = None
if (str(page_number + 1)) not in source_context.cursors:
limit = source_context.chunk_size
log.info('fetch page %d using limit %d' % (page_number, limit))
results = query.fetch(limit=limit, read_policy=db.EVENTUAL_CONSISTENCY)
if (str(page_number + 1)) not in source_context.cursors:
cursor = query.cursor()
if cursor:
if len(results) >= source_context.chunk_size:
source_context.cursors[str(page_number + 1)] = cursor
log.info('fetch page %d saving end cursor' % page_number)
else:
log.info('fetch page %d is partial; not saving end cursor'
% page_number)
else:
log.info('fetch_page %d had no end cursor' % page_number)
return results
@classmethod
def _build_transform_fn(cls, context):
if not context.pii_secret:
# This value is used in key generation in entities, and so
# cannot be None or an empty string; the appengine DB internals
# will complain.
return lambda pii: 'None'
return functools.partial(crypto.hmac_sha_2_256_transform,
context.pii_secret)
# Package-protected pylint: disable=protected-access
class _DbTableContext(base_types._AbstractContextManager):
"""Save/restore interface for context specific to DbTableRestDataSource.
chunk_size=<N>: Specify the number of data items desired per page.
If not provided, the default value is
base_types._AbstractRestDataSource.RECOMMENDED_MAX_DATA_ITEMS.
filters=<filter>: May be specified zero or more times. Each
filter must be of the form: <name><comparator><literal>
Here, <name> is the name of a field on which to filter.
The <comparator> is one of "=", "<", ">", "<=", ">='
with the obvious meaning.
Lastly, <literal> is a literal value of a type matching the
filtered field.
orderings=<name>: May be specified zero or more times. This
specifies a sort order based on a field. The format is
<field> or <field>.asc or <field>.desc, where <field> is
the name of a field. Note that if a less-than or greater-than
filter is applied, these fields must also be ordered by before
you specify any other order-by fields.
"""
# Classes defining various versions of source_context used for
# DbTableRestDataSource.
class _TableContext1(object):
def __init__(self, version, chunk_size, filters, orderings, cursors,
pii_secret, send_uncensored_pii_data=False):
"""Set up a context.
Note: This plain-old-data class is being used in preference over a
collections.namedtuple(), because for export to the JS on a page, we
want to be able to "just get all the members", which is done using
the __dict__ member. This works fine for namedtuple proper, but
when a namedtuple is serialized (pickled) and then unpickled, it
appears to come out as some type that acts like a namedtuple
w.r.t. the individual elements, but the __dict__ member is not
present. This situation never seems to come up in dev environments,
but it does occur in production reliably enough to count as a bug.
Thus we make this class by hand, the old fashioned way.
Args:
version: Always 1 to match TableContext1
chunk_size: Goal number of items in each page.
filters: List of strings of form <field>.<op>.<value>
orderings: List of strings of form <field>.{asc|desc}
cursors: List of opaque AppEngine DB cursor strings, one per page
pii_secret: Session-specific encryption key for PII data.
"""
self.version = version
self.chunk_size = chunk_size
self.filters = filters
self.orderings = orderings
self.cursors = cursors
self.pii_secret = pii_secret
# This field is present, but normally never set. In one-off
# requests from the Data Pump, where the administrator has checked
# a checkbox, un-blacklisted data is available. Note that setting
# this flag will also almost certainly change the reported schema.
self.send_uncensored_pii_data = False
@classmethod
def build_from_web_request(cls, params, default_chunk_size):
chunk_size = params.get('chunk_size')
filters = params.get_all('filter')
orderings = params.get_all('ordering')
if not chunk_size and not filters and not orderings:
return None
chunk_size = int(chunk_size or default_chunk_size)
secret = cls._build_secret(params)
return cls._TableContext1(1, chunk_size, filters, orderings, {}, secret)
@classmethod
def build_from_dict(cls, context_dict):
version = context_dict.get('version', -1)
if version == 1:
return cls._TableContext1(**context_dict)
else:
raise NotImplementedError(
'Source context version %d is not supported.' % version)
@classmethod
def build_blank_default(cls, params, default_chunk_size):
secret = cls._build_secret(params)
return cls._TableContext1(
1,
default_chunk_size,
[], # no filters
[], # no orderings
{}, # no cursors
secret)
@classmethod
def save_to_dict(cls, context):
# convert namedtuple to regular Python dict
return context.__dict__
@classmethod
def get_public_params_for_display(cls, context):
ret = copy.copy(context.__dict__)
del ret['version']
del ret['cursors']
del ret['pii_secret']
del ret['send_uncensored_pii_data']
return ret
@classmethod
def equivalent(cls, new_context, old_context):
return (
new_context.version == old_context.version and
new_context.chunk_size == old_context.chunk_size and
new_context.filters == old_context.filters and
new_context.orderings == old_context.orderings)
@classmethod
def _build_secret(cls, params):
data_source_token = params.get('data_source_token')
return crypto.generate_transform_secret_from_xsrf_token(
data_source_token,
data_sources_utils.DATA_SOURCE_ACCESS_XSRF_ACTION)
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing data source contents via REST interface."""
__author__ = 'Mike Gainer (mgainer@google.com)'
from models.data_sources import base_types
from models.data_sources import source_handler
from models.data_sources import paginated_table
from models.data_sources import registry
# Make these types available at models.data_sources so that client
# code does not have to know about our internal structure.
# pylint: disable=protected-access
AbstractDbTableRestDataSource = paginated_table._AbstractDbTableRestDataSource
AbstractRestDataSource = base_types._AbstractRestDataSource
AbstractSmallRestDataSource = base_types._AbstractSmallRestDataSource
AbstractContextManager = base_types._AbstractContextManager
DbTableContext = paginated_table._DbTableContext
NullContextManager = base_types._NullContextManager
Registry = registry._Registry
SynchronousQuery = base_types._SynchronousQuery
# pylint: enable=protected-access
def _generate_rest_handler(rest_data_source_class):
# (Package protected) pylint: disable=protected-access
class CurriedRestHandler(source_handler._AbstractRestDataSourceHandler):
"""Web handler class curried with class of rest data source."""
@classmethod
def get_data_source_class(cls):
return rest_data_source_class
return CurriedRestHandler
def get_namespaced_handlers():
"""Create URLs + handler classes customized to REST data source types.
Other modules must register their analytics with this module before
this module is registered. This function produces a list of handlers
for all REST data source URLs in all analytics.
Returns:
A (URL, handler) 2-tuple for each rest data source class mentioned
in any analytic.
"""
ret = []
# Convert set into sorted list so WebApp always sees items in the same
# order. In theory, this shouldn't matter. In practice, the difference
# between theory and practice may be nonzero, so doing this JIC.
for clazz in sorted(Registry.get_rest_data_source_classes()):
ret.append(('/rest/data/%s/items' % clazz.get_name(),
_generate_rest_handler(clazz)))
return ret
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enforces schema and verifies course files for referential integrity.
Use this script to verify referential integrity of your course definition files
before you import them into the production instance of Google AppEngine.
Here is how to use the script:
- prepare your course files
- edit the data/unit.csv file
- edit the data/lesson.csv file
- edit the assets/js/activity-*.*.js files
- edit the assets/js/assessment-*.js files
- run the script from a command line by navigating to the root
directory of the app and then typing "python tools/verify.py"
- review the report printed to the console for errors and warnings
Good luck!
"""
import csv
import json
import os
import re
from StringIO import StringIO
import sys
BOOLEAN = object()
STRING = object()
FLOAT = object()
INTEGER = object()
CORRECT = object()
REGEX = object()
INTEGER_OR_INTEGER_LIST = object()
SCHEMA = {
'assessment': {
'assessmentName': STRING,
'preamble': STRING,
'checkAnswers': BOOLEAN,
'questionsList': [{
'questionHTML': STRING,
'lesson': STRING,
'choices': [STRING, CORRECT],
# The fractional score for each choice in this question, if it is
# multiple-choice. Each of these values should be between 0.0 and
# 1.0, inclusive.
'choiceScores': [FLOAT],
# The weight given to the entire question.
'weight': INTEGER,
'multiLine': BOOLEAN,
'correctAnswerNumeric': FLOAT,
'correctAnswerString': STRING,
'correctAnswerRegex': REGEX}]
}, 'activity': [
STRING,
{
'questionType': 'multiple choice',
'questionHTML': STRING,
'choices': [[STRING, BOOLEAN, STRING]]
}, {
'questionType': 'multiple choice group',
'questionGroupHTML': STRING,
'questionsList': [{
'questionHTML': STRING,
'choices': [STRING],
'correctIndex': INTEGER_OR_INTEGER_LIST,
'multiSelect': BOOLEAN}],
'allCorrectMinCount': INTEGER,
'allCorrectOutput': STRING,
'someIncorrectOutput': STRING
}, {
'questionType': 'freetext',
'questionHTML': STRING,
'correctAnswerRegex': REGEX,
'correctAnswerOutput': STRING,
'incorrectAnswerOutput': STRING,
'showAnswerOutput': STRING,
'showAnswerPrompt': STRING,
'outputHeight': STRING
}]}
UNIT_TYPE_UNIT = 'U'
UNIT_TYPE_LINK = 'O'
UNIT_TYPE_ASSESSMENT = 'A'
UNIT_TYPE_CUSTOM = 'X'
UNIT_TYPES = [UNIT_TYPE_UNIT, UNIT_TYPE_LINK, UNIT_TYPE_ASSESSMENT,
UNIT_TYPE_CUSTOM]
UNIT_TYPE_NAMES = {
UNIT_TYPE_UNIT: 'Unit',
UNIT_TYPE_LINK: 'Link',
UNIT_TYPE_ASSESSMENT: 'Assessment',
UNIT_TYPE_CUSTOM: 'Custom Unit'}
UNITS_HEADER = (
'id,type,unit_id,title,release_date,now_available')
LESSONS_HEADER = (
'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,'
'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives')
UNIT_CSV_TO_DB_CONVERTER = {
'id': None,
'type': ('type', unicode),
'unit_id': ('unit_id', unicode),
'title': ('title', unicode),
'release_date': ('release_date', unicode),
'now_available': ('now_available', lambda value: value == 'True')
}
LESSON_CSV_TO_DB_CONVERTER = {
'unit_id': ('unit_id', int),
# Field 'unit_title' is a duplicate of Unit.title. We enforce that both
# values are the same and ignore this value altogether.
'unit_title': None,
'lesson_id': ('lesson_id', int),
'lesson_title': ('title', unicode),
'lesson_activity': ('activity', lambda value: value == 'yes'),
'lesson_activity_name': ('activity_title', unicode),
'lesson_video_id': ('video', unicode),
'lesson_objectives': ('objectives', unicode),
'lesson_notes': ('notes', unicode)
}
# pylint: disable=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n'
# pylint: enable=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>'
OUTPUT_FINE_LOG = False
OUTPUT_DEBUG_LOG = False
class Term(object):
def __init__(self, term_type, value=None):
self.term_type = term_type
self.value = value
def __eq__(self, other):
if type(other) is not Term:
return False
else:
return ((self.term_type == other.term_type) and
(self.value == other.value))
class SchemaException(Exception):
"""A class to represent a schema error."""
def format_primitive_value_name(self, name):
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == BOOLEAN:
return 'BOOLEAN'
return name
def format_primitive_type_name(self, name):
"""Formats a name for a primitive type."""
if name == BOOLEAN:
return 'BOOLEAN'
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == STRING or isinstance(name, basestring):
return 'STRING'
if name == FLOAT:
return 'FLOAT'
if name == INTEGER_OR_INTEGER_LIST:
return 'INTEGER_OR_INTEGER_LIST'
if name == INTEGER:
return 'INTEGER'
if isinstance(name, dict):
return '{...}'
if isinstance(name, list):
return '[...]'
return 'Unknown type name \'%s\'' % name.__class__.__name__
def format_type_names(self, names):
if isinstance(names, list):
captions = []
for name in names:
captions.append(self.format_primitive_type_name(name))
return captions
else:
return self.format_primitive_type_name(names)
def __init__(self, message, value=None, types=None, path=None):
prefix = ''
if path:
prefix = 'Error at %s\n' % path
if types is not None:
if value:
message = prefix + message % (
self.format_primitive_value_name(value),
self.format_type_names(types))
else:
message = prefix + message % self.format_type_names(types)
else:
if value:
message = prefix + (
message % self.format_primitive_value_name(value))
else:
message = prefix + message
super(SchemaException, self).__init__(message)
class Context(object):
""""A class that manages a stack of traversal contexts."""
def __init__(self):
self.parent = None
self.path = ['/']
def new(self, names):
""""Derives a new context from the current one."""
context = Context()
context.parent = self
context.path = list(self.path)
if names:
if isinstance(names, list):
for name in names:
if name:
context.path.append('/' + '%s' % name)
else:
context.path.append('/' + '%s' % names)
return context
def format_path(self):
"""Formats the canonical name of this context."""
return ''.join(self.path)
class SchemaHelper(object):
"""A class that knows how to apply the schema."""
def __init__(self):
self.type_stats = {}
def visit_element(self, atype, value, context, is_terminal=True):
"""Callback for each schema element being traversed."""
if atype in self.type_stats:
count = self.type_stats[atype]
else:
count = 0
self.type_stats[atype] = count + 1
if is_terminal:
self.parse_log.append(' TERMINAL: %s %s = %s' % (
atype, context.format_path(), value))
else:
self.parse_log.append(' NON-TERMINAL: %s %s' % (
atype, context.format_path()))
def extract_all_terms_to_depth(self, key, values, type_map):
"""Walks schema type map recursively to depth."""
# Walks schema type map recursively to depth and creates a list of all
# possible {key: value} pairs. The latter is a list of all non-terminal
# and terminal terms allowed in the schema. The list of terms from this
# method can be bound to an execution context for evaluating whether a
# given instance's map complies with the schema.
if key:
type_map.update({key: key})
if values == REGEX:
type_map.update({'regex': lambda x: Term(REGEX, x)})
return
if values == CORRECT:
type_map.update({'correct': lambda x: Term(CORRECT, x)})
return
if values == BOOLEAN:
type_map.update(
{'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)})
return
if values == STRING or values == INTEGER:
return
if isinstance(values, dict):
for new_key, new_value in values.items():
self.extract_all_terms_to_depth(new_key, new_value, type_map)
return
if isinstance(values, list):
for new_value in values:
self.extract_all_terms_to_depth(None, new_value, type_map)
return
def find_selectors(self, type_map):
"""Finds all type selectors."""
# Finds all elements in the type map where both a key and a value are
# strings. These elements are used to find one specific type map among
# several alternative type maps.
selector = {}
for akey, avalue in type_map.items():
if isinstance(akey, basestring) and isinstance(avalue, basestring):
selector.update({akey: avalue})
return selector
def find_compatible_dict(self, value_map, type_map, unused_context):
"""Find the type map most compatible with the value map."""
# A value map is considered compatible with a type map when former
# contains the same key names and the value types as the type map.
# special case when we have just one type; check name and type are the
# same
if len(type_map) == 1:
for value_key in value_map.keys():
for key in type_map[0].keys():
if value_key == key:
return key, type_map[0]
raise SchemaException(
"Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map)
# case when we have several types to choose from
for adict in type_map:
dict_selector = self.find_selectors(adict)
for akey, avalue in dict_selector.items():
if value_map[akey] == avalue:
return akey, adict
return None, None
def check_single_value_matches_type(self, value, atype, context):
"""Checks if a single value matches a specific (primitive) type."""
if atype == BOOLEAN:
if isinstance(value, bool) or value.term_type == BOOLEAN:
self.visit_element('BOOLEAN', value, context)
return True
else:
raise SchemaException(
'Expected: \'true\' or \'false\'\nfound: %s', value)
if isinstance(atype, basestring):
if isinstance(value, basestring):
self.visit_element('str', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == STRING:
if isinstance(value, basestring):
self.visit_element('STRING', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == REGEX and value.term_type == REGEX:
self.visit_element('REGEX', value, context)
return True
if atype == CORRECT and value.term_type == CORRECT:
self.visit_element('CORRECT', value, context)
return True
if atype == FLOAT:
if is_number(value):
self.visit_element('NUMBER', value, context)
return True
else:
raise SchemaException('Expected: \'number\'\nfound: %s', value)
if atype == INTEGER_OR_INTEGER_LIST:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
if is_integer_list(value):
self.visit_element('INTEGER_OR_INTEGER_LIST', value, context)
return True
raise SchemaException(
'Expected: \'integer\' or '
'\'array of integer\'\nfound: %s', value,
path=context.format_path())
if atype == INTEGER:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
else:
raise SchemaException(
'Expected: \'integer\'\nfound: %s', value,
path=context.format_path())
raise SchemaException(
'Unexpected value \'%s\'\n'
'for type %s', value, atype, path=context.format_path())
def check_value_list_matches_type(self, value, atype, context):
"""Checks if all items in value list match a specific type."""
for value_item in value:
found = False
for atype_item in atype:
if isinstance(atype_item, list):
for atype_item_item in atype_item:
if self.does_value_match_type(
value_item, atype_item_item, context):
found = True
break
else:
if self.does_value_match_type(
value_item, atype_item, context):
found = True
break
if not found:
raise SchemaException(
'Expected: \'%s\'\nfound: %s', atype, value)
return True
def check_value_matches_type(self, value, atype, context):
"""Checks if single value or a list of values match a specific type."""
if isinstance(atype, list) and isinstance(value, list):
return self.check_value_list_matches_type(value, atype, context)
else:
return self.check_single_value_matches_type(value, atype, context)
def does_value_match_type(self, value, atype, context):
"""Same as other method, but does not throw an exception."""
try:
return self.check_value_matches_type(value, atype, context)
except SchemaException:
return False
def does_value_match_one_of_types(self, value, types, context):
"""Checks if a value matches to one of the types in the list."""
type_names = None
if isinstance(types, list):
type_names = types
if type_names:
for i in range(0, len(type_names)):
if self.does_value_match_type(value, type_names[i], context):
return True
return False
def does_value_match_map_of_type(self, value, types, context):
"""Checks if value matches any variation of {...} type."""
# find all possible map types
maps = []
for atype in types:
if isinstance(atype, dict):
maps.append(atype)
if not maps and isinstance(types, dict):
maps.append(types)
# check if the structure of value matches one of the maps
if isinstance(value, dict):
aname, adict = self.find_compatible_dict(value, maps, context)
if adict:
self.visit_element(
'dict', value, context.new(aname), is_terminal=False)
for akey, avalue in value.items():
if akey not in adict:
raise SchemaException(
'Unknown term \'%s\'', akey,
path=context.format_path())
self.check_value_of_valid_type(
avalue, adict[akey], context.new([aname, akey]))
return True
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return False
def format_name_with_index(self, alist, aindex):
"""A function to format a context name with an array element index."""
if len(alist) == 1:
return ''
else:
return '[%s]' % aindex
def does_value_match_list_of_types_in_order(
self, value, types, context, target):
"""Iterates the value and types in given order and checks for match."""
all_values_are_lists = True
for avalue in value:
if not isinstance(avalue, list):
all_values_are_lists = False
if all_values_are_lists:
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], types, context.new(
self.format_name_with_index(value, i)), in_order=True)
else:
if len(target) != len(value):
raise SchemaException(
'Expected: \'%s\' values\n' + 'found: %s.' % value,
len(target), path=context.format_path())
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], target[i], context.new(
self.format_name_with_index(value, i)))
return True
def does_value_match_list_of_types_any_order(self, value, types,
context, lists):
"""Iterates the value and types, checks if they match in any order."""
target = lists
if not target:
if not isinstance(types, list):
raise SchemaException(
'Unsupported type %s',
None, types, path=context.format_path())
target = types
for i in range(0, len(value)):
found = False
for atarget in target:
try:
self.check_value_of_valid_type(
value[i], atarget,
context.new(self.format_name_with_index(value, i)))
found = True
break
except SchemaException as unused_e:
continue
if not found:
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return True
def does_value_match_list_of_type(self, value, types, context, in_order):
"""Checks if a value matches a variation of [...] type."""
# Extra argument controls whether matching must be done in a specific
# or in any order. A specific order is demanded by [[...]]] construct,
# i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and
# [...] can be matched in any order.
# prepare a list of list types
lists = []
for atype in types:
if isinstance(atype, list):
lists.append(atype)
if len(lists) > 1:
raise SchemaException(
'Unable to validate types with multiple alternative '
'lists %s', None, types, path=context.format_path())
if isinstance(value, list):
if len(lists) > 1:
raise SchemaException(
'Allowed at most one list\nfound: %s.',
None, types, path=context.format_path())
# determine if list is in order or not as hinted by double array
# [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]]
# demands order
ordered = len(lists) == 1 and isinstance(types, list)
if in_order or ordered:
return self.does_value_match_list_of_types_in_order(
value, types, context, lists[0])
else:
return self.does_value_match_list_of_types_any_order(
value, types, context, lists)
return False
def check_value_of_valid_type(self, value, types, context, in_order=None):
"""Check if a value matches any of the given types."""
if not (isinstance(types, list) or isinstance(types, dict)):
self.check_value_matches_type(value, types, context)
return
if (self.does_value_match_list_of_type(value, types,
context, in_order) or
self.does_value_match_map_of_type(value, types, context) or
self.does_value_match_one_of_types(value, types, context)):
return
raise SchemaException(
'Unknown type %s', value, path=context.format_path())
def check_instances_match_schema(self, values, types, name):
"""Recursively decompose 'values' to see if they match schema types."""
self.parse_log = []
context = Context().new(name)
self.parse_log.append(' ROOT %s' % context.format_path())
# pylint: disable=protected-access
values_class = values.__class__
# pylint: enable=protected-access
# handle {..} containers
if isinstance(types, dict):
if not isinstance(values, dict):
raise SchemaException(
'Error at \'/\': expected {...}, found %s' % (
values_class.__name__))
self.check_value_of_valid_type(values, types, context.new([]))
return
# handle [...] containers
if isinstance(types, list):
if not isinstance(values, list):
raise SchemaException(
'Error at \'/\': expected [...], found %s' % (
values_class.__name__))
for i in range(0, len(values)):
self.check_value_of_valid_type(
values[i], types, context.new('[%s]' % i))
return
raise SchemaException(
'Expected an array or a dictionary.', None,
path=context.format_path())
def escape_quote(value):
return unicode(value).replace('\'', r'\'')
class Unit(object):
"""A class to represent a Unit."""
def __init__(self):
self.id = 0
self.type = ''
self.unit_id = ''
self.title = ''
self.release_date = ''
self.now_available = False
def list_properties(self, name, output):
"""Outputs all properties of the unit."""
output.append('%s[\'id\'] = %s;' % (name, self.id))
output.append('%s[\'type\'] = \'%s\';' % (
name, escape_quote(self.type)))
output.append('%s[\'unit_id\'] = \'%s\';' % (
name, escape_quote(self.unit_id)))
output.append('%s[\'title\'] = \'%s\';' % (
name, escape_quote(self.title)))
output.append('%s[\'release_date\'] = \'%s\';' % (
name, escape_quote(self.release_date)))
output.append('%s[\'now_available\'] = %s;' % (
name, str(self.now_available).lower()))
class Lesson(object):
"""A class to represent a Lesson."""
def __init__(self):
self.unit_id = 0
self.unit_title = ''
self.lesson_id = 0
self.lesson_title = ''
self.lesson_activity = ''
self.lesson_activity_name = ''
self.lesson_notes = ''
self.lesson_video_id = ''
self.lesson_objectives = ''
def list_properties(self, name, output):
"""Outputs all properties of the lesson."""
activity = 'false'
if self.lesson_activity == 'yes':
activity = 'true'
output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id))
output.append('%s[\'unit_title\'] = \'%s\';' % (
name, escape_quote(self.unit_title)))
output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id))
output.append('%s[\'lesson_title\'] = \'%s\';' % (
name, escape_quote(self.lesson_title)))
output.append('%s[\'lesson_activity\'] = %s;' % (name, activity))
output.append('%s[\'lesson_activity_name\'] = \'%s\';' % (
name, escape_quote(self.lesson_activity_name)))
output.append('%s[\'lesson_notes\'] = \'%s\';' % (
name, escape_quote(self.lesson_notes)))
output.append('%s[\'lesson_video_id\'] = \'%s\';' % (
name, escape_quote(self.lesson_video_id)))
output.append('%s[\'lesson_objectives\'] = \'%s\';' % (
name, escape_quote(self.lesson_objectives)))
def to_id_string(self):
return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title)
class Assessment(object):
"""A class to represent a Assessment."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'assessment', SCHEMA['assessment'], self.scope)
class Activity(object):
"""A class to represent a Activity."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'activity', SCHEMA['activity'], self.scope)
def silent_echo(unused_message):
pass
def echo(message):
print message
def is_integer_list(s):
try:
if not isinstance(s, list):
return False
for item in s:
if not isinstance(item, int):
return False
return True
except ValueError:
return False
def is_integer(s):
try:
return int(s) == float(s)
except Exception: # pylint: disable=broad-except
return False
def is_boolean(s):
try:
return s == 'True' or s == 'False'
except ValueError:
return False
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_one_of(value, values):
for current in values:
if value == current:
return True
return False
def text_to_line_numbered_text(text):
"""Adds line numbers to the provided text."""
lines = text.split('\n')
results = []
i = 1
for line in lines:
results.append(str(i) + ': ' + line)
i += 1
return '\n '.join(results)
def set_object_attributes(target_object, names, values, converter=None):
"""Sets object attributes from provided values."""
if len(names) != len(values):
raise SchemaException(
'The number of elements must match: %s and %s' % (names, values))
for i in range(len(names)):
if converter:
target_def = converter.get(names[i])
if target_def:
target_name = target_def[0]
target_type = target_def[1]
setattr(target_object, target_name, target_type(values[i]))
continue
if is_integer(values[i]):
# if we are setting an attribute of an object that support
# metadata, try to infer the target type and convert 'int' into
# 'str' here
target_type = None
if hasattr(target_object.__class__, names[i]):
attribute = getattr(target_object.__class__, names[i])
if hasattr(attribute, 'data_type'):
target_type = attribute.data_type.__name__
if target_type and (target_type == 'str' or
target_type == 'basestring'):
setattr(target_object, names[i], str(values[i]))
else:
setattr(target_object, names[i], int(values[i]))
continue
if is_boolean(values[i]):
setattr(target_object, names[i], bool(values[i]))
continue
setattr(target_object, names[i], values[i])
def read_objects_from_csv_stream(stream, header, new_object, converter=None):
return read_objects_from_csv(
csv.reader(StringIO(stream.read())), header, new_object,
converter=converter)
def read_objects_from_csv_file(fname, header, new_object):
return read_objects_from_csv_stream(open(fname), header, new_object)
def read_objects_from_csv(value_rows, header, new_object, converter=None):
"""Reads objects from the rows of a CSV file."""
values = []
for row in value_rows:
if not row:
continue
values.append(row)
names = header.split(',')
if names != values[0]:
raise SchemaException(
'Error reading CSV header.\n '
'Header row had %s element(s): %s\n '
'Expected header row with %s element(s): %s' % (
len(values[0]), values[0], len(names), names))
items = []
for i in range(1, len(values)):
if len(names) != len(values[i]):
raise SchemaException(
'Error reading CSV data row.\n '
'Row #%s had %s element(s): %s\n '
'Expected %s element(s): %s' % (
i, len(values[i]), values[i], len(names), names))
# Decode string values in case they were encoded in UTF-8. The CSV
# reader should do this automatically, but it does not. The issue is
# discussed here: http://docs.python.org/2/library/csv.html
decoded_values = []
for value in values[i]:
if isinstance(value, basestring):
value = unicode(value.decode('utf-8'))
decoded_values.append(value)
item = new_object()
set_object_attributes(item, names, decoded_values, converter=converter)
items.append(item)
return items
def escape_javascript_regex(text):
return re.sub(
r'correctAnswerRegex([:][ ]*)([/])(.*)([/][ismx]*)',
r'correctAnswerRegex: regex("\2\3\4")', text)
def remove_javascript_single_line_comment(text):
text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text)
text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text)
return text
def remove_javascript_multi_line_comment(text):
# pylint: disable=anomalous-backslash-in-string
return re.sub(
re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text)
# pylint: enable=anomalous-backslash-in-string
def parse_content_marked_no_verify(content):
"""Parses and returns a tuple of real content and no-verify text."""
# If you have any free-form JavaScript in the activity file, you need
# to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags
# so that the verifier can selectively ignore it.
pattern = re.compile('%s(.*)%s' % (
NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL)
m = pattern.search(content)
noverify_text = None
if m:
noverify_text = m.group(1)
return (re.sub(pattern, '', content), noverify_text)
def convert_javascript_to_python(content, root_name):
"""Removes JavaScript specific syntactic constructs and returns a tuple."""
# Reads the content and removes JavaScript comments, var's, and escapes
# regular expressions.
(content, noverify_text) = parse_content_marked_no_verify(content)
content = remove_javascript_multi_line_comment(content)
content = remove_javascript_single_line_comment(content)
content = content.replace('var %s = ' % root_name, '%s = ' % root_name)
content = escape_javascript_regex(content)
return (content, noverify_text)
def convert_javascript_file_to_python(fname, root_name):
return convert_javascript_to_python(
''.join(open(fname, 'r').readlines()), root_name)
def legacy_eval_python_expression_for_test(content, scope, unused_root_name):
"""Legacy content parsing function using compile/exec."""
print 'WARNING! This code is unsafe and uses compile/exec!'
# First compiles and then evaluates a Python script text in a restricted
# environment using provided bindings. Returns the resulting bindings if
# evaluation completed.
# create a new execution scope that has only the schema terms defined;
# remove all other languages constructs including __builtins__
restricted_scope = {}
restricted_scope.update(scope)
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
exec code in restricted_scope # pylint: disable=exec-used
return restricted_scope
def not_implemented_parse_content(
unused_content, unused_scope, unused_root_name):
raise Exception('Not implemented.')
# by default no parser method is configured; set custom parser if you have it
parse_content = not_implemented_parse_content
def evaluate_python_expression_from_text(content, root_name, scope,
noverify_text):
"""Compiles and evaluates a Python script in a restricted environment."""
restricted_scope = parse_content(content, scope, root_name)
if noverify_text:
restricted_scope['noverify'] = noverify_text
if restricted_scope.get(root_name) is None:
raise Exception('Unable to find \'%s\'' % root_name)
return restricted_scope
def evaluate_javascript_expression_from_file(fname, root_name, scope, error):
(content, noverify_text) = convert_javascript_file_to_python(fname,
root_name)
try:
return evaluate_python_expression_from_text(content, root_name, scope,
noverify_text)
except:
error('Unable to parse %s in file %s\n %s' % (
root_name, fname, text_to_line_numbered_text(content)))
for message in sys.exc_info():
error(str(message))
raise
class Verifier(object):
"""Verifies Units, Lessons, Assessments, Activities and their relations."""
def __init__(self):
self.echo_func = silent_echo
self.schema_helper = SchemaHelper()
self.errors = 0
self.warnings = 0
self.export = []
def verify_unit_fields(self, units):
self.export.append('units = Array();')
for unit in units:
if not is_one_of(unit.now_available, [True, False]):
self.error(
'Bad now_available \'%s\' for unit id %s; expected '
'\'True\' or \'False\'' % (unit.now_available, unit.id))
if not is_one_of(unit.type, UNIT_TYPES):
self.error(
'Bad type \'%s\' for unit id %s; '
'expected: %s.' % (unit.type, unit.id, UNIT_TYPES))
if unit.type == 'U':
if not is_integer(unit.unit_id):
self.error(
'Expected integer unit_id, found %s in unit id '
' %s' % (unit.unit_id, unit.id))
self.export.append('')
self.export.append('units[%s] = Array();' % unit.id)
self.export.append('units[%s][\'lessons\'] = Array();' % unit.id)
unit.list_properties('units[%s]' % unit.id, self.export)
def verify_lesson_fields(self, lessons):
for lesson in lessons:
if not is_one_of(lesson.lesson_activity, ['yes', '']):
self.error('Bad lesson_activity \'%s\' for lesson_id %s' % (
lesson.lesson_activity, lesson.lesson_id))
self.export.append('')
self.export.append('units[%s][\'lessons\'][%s] = Array();' % (
lesson.unit_id, lesson.lesson_id))
lesson.list_properties('units[%s][\'lessons\'][%s]' % (
lesson.unit_id, lesson.lesson_id), self.export)
def verify_unit_lesson_relationships(self, units, lessons):
"""Checks each lesson points to a unit and all lessons are in use."""
used_lessons = []
units.sort(key=lambda x: x.id)
# for unit in units:
for i in range(0, len(units)):
unit = units[i]
# check that unit ids are 1-based and sequential
if unit.id != i + 1:
self.error('Unit out of order: %s' % (unit.id))
# get the list of lessons for each unit
self.fine('Unit %s: %s' % (unit.id, unit.title))
unit_lessons = []
for lesson in lessons:
if lesson.unit_id == unit.unit_id:
if lesson.unit_title != unit.title:
raise Exception(''.join([
'A unit_title of a lesson (id=%s) must match ',
'title of a unit (id=%s) the lesson belongs to.'
]) % (lesson.lesson_id, lesson.unit_id))
unit_lessons.append(lesson)
used_lessons.append(lesson)
# inspect all lessons for the current unit
unit_lessons.sort(key=lambda x: x.lesson_id)
for j in range(0, len(unit_lessons)):
lesson = unit_lessons[j]
# check that lesson_ids are 1-based and sequential
if lesson.lesson_id != j + 1:
self.warn(
'Lesson lesson_id is out of order: expected %s, found '
' %s (%s)' % (
j + 1, lesson.lesson_id, lesson.to_id_string()))
self.fine(' Lesson %s: %s' % (
lesson.lesson_id, lesson.lesson_title))
# find lessons not used by any of the units
unused_lessons = list(lessons)
for lesson in used_lessons:
unused_lessons.remove(lesson)
for lesson in unused_lessons:
self.warn('Unused lesson_id %s (%s)' % (
lesson.lesson_id, lesson.to_id_string()))
# check all lessons point to known units
for lesson in lessons:
has = False
for unit in units:
if lesson.unit_id == unit.unit_id:
has = True
break
if not has:
self.error('Lesson has unknown unit_id %s (%s)' % (
lesson.unit_id, lesson.to_id_string()))
def get_activity_as_python(self, unit_id, lesson_id):
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/activity-%s.%s.js' % (unit_id, lesson_id))
if not os.path.exists(fname):
self.error(' Missing activity: %s' % fname)
else:
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, self.error)
self.verify_activity_instance(activity, fname)
return activity
def verify_activities(self, lessons):
"""Loads and verifies all activities."""
self.info('Loading activities:')
count = 0
for lesson in lessons:
if lesson.lesson_activity == 'yes':
count += 1
activity = self.get_activity_as_python(
lesson.unit_id, lesson.lesson_id)
self.export.append('')
self.encode_activity_json(
activity, lesson.unit_id, lesson.lesson_id)
self.info('Read %s activities' % count)
def verify_assessment(self, units):
"""Loads and verifies all assessments."""
self.export.append('')
self.export.append('assessments = Array();')
self.info('Loading assessment:')
count = 0
for unit in units:
if unit.type == 'A':
count += 1
assessment_name = str(unit.unit_id)
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/assessment-%s.js' % assessment_name)
if not os.path.exists(fname):
self.error(' Missing assessment: %s' % fname)
else:
assessment = evaluate_javascript_expression_from_file(
fname, 'assessment', Assessment().scope, self.error)
self.verify_assessment_instance(assessment, fname)
self.export.append('')
self.encode_assessment_json(assessment, assessment_name)
self.info('Read %s assessments' % count)
# NB: The exported script needs to define a gcb_regex() wrapper function
@staticmethod
def encode_regex(regex_str):
"""Encodes a JavaScript-style regex into a Python gcb_regex call."""
# parse the regex into the base and modifiers. e.g., for /foo/i
# base is 'foo' and modifiers is 'i'
assert regex_str[0] == '/'
# find the LAST '/' in regex_str (because there might be other
# escaped '/' characters in the middle of regex_str)
final_slash_index = regex_str.rfind('/')
assert final_slash_index > 0
base = regex_str[1:final_slash_index]
modifiers = regex_str[final_slash_index + 1:]
func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')'
return func_str
def encode_activity_json(self, activity_dict, unit_id, lesson_id):
"""Encodes an activity dictionary into JSON."""
output = []
for elt in activity_dict['activity']:
t = type(elt)
encoded_elt = None
if t is str:
encoded_elt = {'type': 'string', 'value': elt}
elif t is dict:
qt = elt['questionType']
encoded_elt = {'type': qt}
if qt == 'multiple choice':
choices = elt['choices']
encoded_choices = [[x, y.value, z] for x, y, z in choices]
encoded_elt['choices'] = encoded_choices
elif qt == 'multiple choice group':
# everything inside are primitive types that can be encoded
elt_copy = dict(elt)
del elt_copy['questionType'] # redundant
encoded_elt['value'] = elt_copy
elif qt == 'freetext':
for k in elt.keys():
if k == 'questionType':
continue
elif k == 'correctAnswerRegex':
encoded_elt[k] = Verifier.encode_regex(elt[k].value)
else:
# ordinary string
encoded_elt[k] = elt[k]
else:
assert False
else:
assert False
assert encoded_elt
output.append(encoded_elt)
# N.B.: make sure to get the string quoting right!
code_str = "units[%s]['lessons'][%s]['activity'] = " % (
unit_id, lesson_id) + repr(json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in activity_dict:
self.export.append('')
noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % (
unit_id, lesson_id) + repr(activity_dict['noverify']) + ';'
self.export.append(noverify_code_str)
def encode_assessment_json(self, assessment_dict, assessment_name):
"""Encodes an assessment dictionary into JSON."""
real_dict = assessment_dict['assessment']
output = {}
output['assessmentName'] = real_dict['assessmentName']
if 'preamble' in real_dict:
output['preamble'] = real_dict['preamble']
output['checkAnswers'] = real_dict['checkAnswers'].value
encoded_questions_list = []
for elt in real_dict['questionsList']:
encoded_elt = {}
encoded_elt['questionHTML'] = elt['questionHTML']
if 'lesson' in elt:
encoded_elt['lesson'] = elt['lesson']
if 'correctAnswerNumeric' in elt:
encoded_elt['correctAnswerNumeric'] = elt[
'correctAnswerNumeric']
if 'correctAnswerString' in elt:
encoded_elt['correctAnswerString'] = elt['correctAnswerString']
if 'correctAnswerRegex' in elt:
encoded_elt['correctAnswerRegex'] = Verifier.encode_regex(
elt['correctAnswerRegex'].value)
if 'choices' in elt:
encoded_choices = []
correct_answer_index = None
for (ind, e) in enumerate(elt['choices']):
if type(e) is str:
encoded_choices.append(e)
elif e.term_type == CORRECT:
encoded_choices.append(e.value)
correct_answer_index = ind
else:
raise Exception("Invalid type in 'choices'")
encoded_elt['choices'] = encoded_choices
encoded_elt['correctAnswerIndex'] = correct_answer_index
encoded_questions_list.append(encoded_elt)
output['questionsList'] = encoded_questions_list
# N.B.: make sure to get the string quoting right!
code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr(
json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in assessment_dict:
self.export.append('')
noverify_code_str = ('assessments[\'' + assessment_name +
'\'] = ' + repr(assessment_dict['noverify']) +
';')
self.export.append(noverify_code_str)
def format_parse_log(self):
return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log)
def verify_assessment_instance(self, scope, fname):
"""Verifies compliance of assessment with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['assessment'], SCHEMA['assessment'], 'assessment')
self.info(' Verified assessment %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in assessment %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'assessment =\' in %s' % fname)
def verify_activity_instance(self, scope, fname):
"""Verifies compliance of activity with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['activity'], SCHEMA['activity'], 'activity')
self.info(' Verified activity %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in activity %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'activity =\' in %s' % fname)
def fine(self, x):
if OUTPUT_FINE_LOG:
self.echo_func('FINE: ' + x)
def info(self, x):
self.echo_func('INFO: ' + x)
def warn(self, x):
self.warnings += 1
self.echo_func('WARNING: ' + x)
def error(self, x):
self.errors += 1
self.echo_func('ERROR: ' + x)
def load_and_verify_model(self, echo_func):
"""Loads, parses and verifies all content for a course."""
self.echo_func = echo_func
self.info('Started verification in: %s' % __file__)
unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv')
lesson_file = os.path.join(
os.path.dirname(__file__), '../data/lesson.csv')
self.info('Loading units from: %s' % unit_file)
units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit)
self.info('Read %s units' % len(units))
self.info('Loading lessons from: %s' % lesson_file)
lessons = read_objects_from_csv_file(
lesson_file, LESSONS_HEADER, Lesson)
self.info('Read %s lessons' % len(lessons))
self.verify_unit_fields(units)
self.verify_lesson_fields(lessons)
self.verify_unit_lesson_relationships(units, lessons)
try:
self.verify_activities(lessons)
self.verify_assessment(units)
except SchemaException as e:
self.error(str(e))
info = (
'Schema usage statistics: %s'
'Completed verification: %s warnings, %s errors.' % (
self.schema_helper.type_stats, self.warnings, self.errors))
self.info(info)
return self.warnings, self.errors, info
def run_all_regex_unit_tests():
"""Executes all tests related to regular expressions."""
# pylint: disable=anomalous-backslash-in-string
assert escape_javascript_regex(
'correctAnswerRegex: /site:bls.gov?/i, blah') == (
'correctAnswerRegex: regex(\"/site:bls.gov?/i\"), blah')
assert escape_javascript_regex(
'correctAnswerRegex: /site:http:\/\/www.google.com?q=abc/i, blah') == (
'correctAnswerRegex: '
'regex(\"/site:http:\/\/www.google.com?q=abc/i\"), blah')
assert remove_javascript_multi_line_comment(
'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah'
assert remove_javascript_multi_line_comment(
'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah')
assert remove_javascript_single_line_comment(
'blah\n// comment\nblah') == 'blah\n\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah http://www.foo.com\nblah') == (
'blah\nblah http://www.foo.com\nblah')
assert remove_javascript_single_line_comment(
'blah\nblah // comment\nblah') == 'blah\nblah\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah // comment http://www.foo.com\nblah') == (
'blah\nblah\nblah')
assert parse_content_marked_no_verify(
'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == (
'blah1\n// \nblah3')
# pylint: enable=anomalous-backslash-in-string
assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')"""
assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') ==
r"""gcb_regex('jane austen (book|books) \\-price', 'i')""")
assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') ==
r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""")
assert (Verifier.encode_regex('/Big Time College Sport?/i') ==
"gcb_regex('Big Time College Sport?', 'i')")
assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') ==
r"""gcb_regex('354\\s*[+]\\s*651', '')""")
# pylint: disable=too-many-statements
def run_all_schema_helper_unit_tests():
"""Executes all tests related to schema validation."""
def assert_same(a, b):
if a != b:
raise Exception('Expected:\n %s\nFound:\n %s' % (a, b))
def assert_pass(instances, types, expected_result=None):
try:
schema_helper = SchemaHelper()
result = schema_helper.check_instances_match_schema(
instances, types, 'test')
if OUTPUT_DEBUG_LOG:
print '\n'.join(schema_helper.parse_log)
if expected_result:
assert_same(expected_result, result)
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
print '\n'.join(schema_helper.parse_log)
raise
def assert_fails(func):
try:
func()
raise Exception('Expected to fail')
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
def assert_fail(instances, types):
assert_fails(lambda: assert_pass(instances, types))
def create_python_dict_from_js_object(js_object):
python_str, noverify = convert_javascript_to_python(
'var x = ' + js_object, 'x')
ret = evaluate_python_expression_from_text(
python_str, 'x', Assessment().scope, noverify)
return ret['x']
# CSV tests
units = read_objects_from_csv(
[
['id', 'type', 'now_available'],
[1, 'U', 'True'],
[1, 'U', 'False']],
'id,type,now_available', Unit, converter=UNIT_CSV_TO_DB_CONVERTER)
assert units[0].now_available
assert not units[1].now_available
read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type', Unit)
def reader_one():
return read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_one)
def reader_two():
read_objects_from_csv(
[['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_two)
# context tests
assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(),
('//a/b/c'))
# simple map tests
assert_pass({'name': 'Bob'}, {'name': STRING})
assert_fail('foo', 'bar')
assert_fail({'name': 'Bob'}, {'name': INTEGER})
assert_fail({'name': 12345}, {'name': STRING})
assert_fail({'amount': 12345}, {'name': INTEGER})
assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)})
assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING})
assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING})
assert_pass({'name': 'Bob'},
{'phone': STRING, 'name': STRING, 'age': INTEGER})
# mixed attributes tests
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_pass({'colors': []}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]})
assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]},
{'colors': [STRING]})
assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'},
{'colors': [STRING]})
assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]})
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]})
assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]})
assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]})
# required attribute tests
assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]})
assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]})
assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]})
assert_fail({'colors': ['red', 5, 'FF0000']},
{'colors': [[STRING, INTEGER]]})
# an array and a map of primitive type tests
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': INTEGER}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': {'hex': STRING}}})
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_pass({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'name': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
assert_fail({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'phone': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
# boolean type tests
assert_pass({'name': 'Bob', 'active': True},
{'name': STRING, 'active': BOOLEAN})
assert_pass({'name': 'Bob', 'active': [5, True, False]},
{'name': STRING, 'active': [INTEGER, BOOLEAN]})
assert_pass({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]})
assert_fail({'name': 'Bob', 'active': [5, True, 'False']},
{'name': STRING, 'active': [[INTEGER, BOOLEAN]]})
# optional attribute tests
assert_pass({'points':
[{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1},
{'x': 2, 'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'points':
[{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'account':
[{'name': 'Bob', 'age': 25, 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
assert_pass({'account':
[{'name': 'Bob', 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
# nested array tests
assert_fail({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [[BOOLEAN]]})
assert_fail({'name': 'Bob', 'active': [True]},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'Bob', 'active': ['true']},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'flowers', 'price': ['USD', 9.99]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
assert_pass({'name': 'flowers', 'price':
[['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
# selector tests
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'NY', 'drink': 'wine'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'CA', 'food': 'nuts'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
# creating from dict tests
assert_same(create_python_dict_from_js_object('{"active": true}'),
{'active': Term(BOOLEAN, True)})
assert_same(create_python_dict_from_js_object(
'{"a": correct("hello world")}'),
{'a': Term(CORRECT, 'hello world')})
assert_same(create_python_dict_from_js_object(
'{correctAnswerRegex: /hello/i}'),
{'correctAnswerRegex': Term(REGEX, '/hello/i')})
def run_example_activity_tests():
"""Parses and validates example activity file."""
fname = os.path.join(
os.path.dirname(__file__), '../assets/js/activity-examples.js')
if not os.path.exists(fname):
raise Exception('Missing file: %s', fname)
verifier = Verifier()
verifier.echo_func = echo
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, verifier.echo_func)
verifier.verify_activity_instance(activity, fname)
def test_exec():
"""This test shows that exec/compile are explitable, thus not safe."""
content = """
foo = [
c for c in ().__class__.__base__.__subclasses__()
if c.__name__ == 'catch_warnings'
][0]()._module.__builtins__
"""
restricted_scope = {}
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
exec code in restricted_scope # pylint: disable=exec-used
assert 'isinstance' in restricted_scope.get('foo')
def test_sample_assets():
"""Test assets shipped with the sample course."""
_, _, output = Verifier().load_and_verify_model(echo)
if (
'Schema usage statistics: {'
'\'REGEX\': 19, \'STRING\': 415, \'NUMBER\': 1, '
'\'BOOLEAN\': 81, \'dict\': 73, \'str\': 41, \'INTEGER\': 9, '
'\'CORRECT\': 9}' not in output
or 'Completed verification: 0 warnings, 0 errors.' not in output):
raise Exception('Sample course verification failed.\n%s' % output)
def run_all_unit_tests():
"""Runs all unit tests in this module."""
global parse_content # pylint: disable=global-statement
original = parse_content
try:
parse_content = legacy_eval_python_expression_for_test
run_all_regex_unit_tests()
run_all_schema_helper_unit_tests()
run_example_activity_tests()
test_exec()
test_sample_assets()
finally:
parse_content = original
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ETL testing utilities."""
import copy
import cStringIO
import logging
import os
from controllers import sites
from tests.functional import actions
from tools.etl import etl
from tools.etl import remote
class EtlTestBase(actions.TestBase):
# Allow access to protected members under test.
# pylint: disable=protected-access
def setUp(self):
"""Configures EtlMainTestCase."""
super(EtlTestBase, self).setUp()
self.test_environ = copy.deepcopy(os.environ)
# In etl.main, use test auth scheme to avoid interactive login.
self.test_environ['SERVER_SOFTWARE'] = remote.TEST_SERVER_SOFTWARE
self.url_prefix = '/test'
self.namespace = 'ns_test'
self.raw = 'course:%s::%s' % (self.url_prefix, self.namespace)
self.swap(os, 'environ', self.test_environ)
sites.setup_courses(self.raw + ', course:/:/')
self.log_stream = cStringIO.StringIO()
self.old_log_handlers = list(etl._LOG.handlers)
etl._LOG.handlers = [logging.StreamHandler(self.log_stream)]
def tearDown(self):
sites.reset_courses()
etl._LOG.handlers = self.old_log_handlers
super(EtlTestBase, self).tearDown()
def get_log(self):
self.log_stream.flush()
return self.log_stream.getvalue()
class FakeEnvironment(object):
"""Temporary fake tools.etl.remote.Evironment.
Bypasses making a remote_api connection because webtest can't handle it and
we don't want to bring up a local server for our functional tests. When this
fake is used, the in-process datastore stub will handle RPCs.
TODO(johncox): find a way to make webtest successfully emulate the
remote_api endpoint and get rid of this fake.
"""
def __init__(self, application_id, server, path=None):
self._appication_id = application_id
self._path = path
self._server = server
def establish(self):
pass
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Remote environment manager for extract-transform-load utilities."""
__author__ = [
'johncox@google.com',
]
import os
import sys
import appengine_config
# Override SERVER_SOFTWARE before doing any App Engine imports so import-time
# detection of dev mode, done against SERVER_SOFTWARE of 'Development*', fails.
# Once imports are done, this environment variable can be reset as needed (for
# tests, etc.).
SERVER_SOFTWARE = 'Production Emulation'
if appengine_config.PRODUCTION_MODE:
sys.exit('Running etl/tools/remote.py in production is not supported.')
os.environ['SERVER_SOFTWARE'] = SERVER_SOFTWARE
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.tools import appengine_rpc
from google.appengine.tools import remote_api_shell
# String. Used to detect appspot.com servers.
_APPSPOT_SERVER_SUFFIX = 'appspot.com'
# String. Password used when a password is not necessary.
_BOGUS_PASSWORD = 'bogus_password'
# String. Infix for google.com application ids.
_GOOGLE_APPLICATION_INFIX = 'google.com'
# String. Prefix App Engine uses application ids in the dev appserver.
_LOCAL_APPLICATION_ID_PREFIX = 'dev~'
# String. Prefix used to detect if a server is running locally.
_LOCAL_SERVER_PREFIX = 'localhost'
# String. Prefix App Engine uses for application ids in production.
_REMOTE_APPLICATION_ID_PREFIX = 's~'
# String. Email address used unless os.environ['USER_EMAIL'] is set in tests.
_TEST_EMAIL = 'test@example.com'
# String. os.ENVIRON['SERVER_SOFTWARE'] value that indicates we're running under
# the test environment.
TEST_SERVER_SOFTWARE = 'Test'
class Error(Exception):
"""Base error type."""
class EnvironmentAuthenticationError(Error):
"""Raised when establishing an environment fails due to bad credentials."""
class Environment(object):
"""Sets up the execution environment to use remote_api for RPCs.
As with any use of remote_api, this has three important caveats:
1. By going through the Remote API rather than your application's handlers,
you are bypassing any business logic in those handlers. It is easy in
this way to accidentally corrupt the system receiving your RPCs.
2. There is no guarantee that the code running on the system receiving your
RPCs is the same version as the code running locally. It is easy to have
version skew that corrupts the destination system.
3. Execution is markedly slower than running in production.
"""
def __init__(
self, application_id, server, path='/_ah/remote_api'):
"""Constructs a new Environment.
Args:
application_id: string. The application id of the environment
(myapp).
server: string. The full name of the server to connect to
(myurl.appspot.com).
path: string. The URL of your app's remote api entry point.
"""
self._application_id = application_id
self._path = path
self._server = server
@staticmethod
def _dev_appserver_auth_func():
"""Auth function to run for dev_appserver (bogus password)."""
return raw_input('Email: '), _BOGUS_PASSWORD
@staticmethod
def _test_auth_func():
"""Auth function to run in tests (bogus username and password)."""
return os.environ.get('USER_EMAIL', _TEST_EMAIL), _BOGUS_PASSWORD
def _get_auth_func(self):
"""Returns authentication function for the remote API."""
if os.environ.get('SERVER_SOFTWARE', '').startswith(
TEST_SERVER_SOFTWARE):
return self._test_auth_func
elif self._is_localhost():
return self._dev_appserver_auth_func
else:
return remote_api_shell.auth_func
def _get_internal_application_id(self):
"""Returns string containing App Engine's internal id representation."""
prefix = _REMOTE_APPLICATION_ID_PREFIX
if self._is_localhost():
prefix = _LOCAL_APPLICATION_ID_PREFIX
elif not self._is_appspot():
prefix = '%s%s:' % (prefix, _GOOGLE_APPLICATION_INFIX)
return prefix + self._application_id
def _get_secure(self):
"""Returns boolean indicating whether or not to use https."""
return not self._is_localhost()
def _is_appspot(self):
"""Returns True iff server is appspot.com."""
return self._server.endswith(_APPSPOT_SERVER_SUFFIX)
def _is_localhost(self):
"""Returns True if environment is dev_appserver and False otherwise."""
return self._server.startswith(_LOCAL_SERVER_PREFIX)
def establish(self):
"""Establishes the environment for RPC execution."""
try:
remote_api_stub.ConfigureRemoteApi(
self._get_internal_application_id(), self._path,
self._get_auth_func(), servername=self._server,
save_cookies=True, secure=self._get_secure(),
rpc_server_factory=appengine_rpc.HttpRpcServer)
remote_api_stub.MaybeInvokeAuthentication()
except AttributeError:
raise EnvironmentAuthenticationError
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for writing extract-transform-load scripts."""
__author__ = [
'johncox@google.com',
]
import argparse
import datetime
import time
from controllers import sites
from models import courses
def get_context(course_url_prefix):
"""Gets requested application context from the given course URL prefix.
Args:
course_url_prefix: string. Value of etl.py's course_url_prefix flag.
Returns:
sites.ApplicationContext.
"""
found = None
for context in sites.get_all_courses():
if context.raw.startswith('course:%s:' % course_url_prefix):
found = context
break
return found
def get_course(app_context):
"""Gets a courses.Course from the given sites.ApplicationContext.
Does not ensure the course exists on the backend; validation should be done
by the caller when getting the app_context object.
Args:
app_context: sites.ApplicationContext. The context we're getting the
course for.
Returns:
courses.Course.
"""
class _Adapter(object):
def __init__(self, app_context):
self.app_context = app_context
return courses.Course(_Adapter(app_context))
class Job(object):
"""Abstract base class for user-defined custom ETL jobs.
Custom jobs can be executed by etl.py. The advantage of this is that they
can run arbitrary local computations, but calls to App Engine services
(db.get() or db.put(), for example) are executed against a remove server.
This allows you to perform arbitrary computations against your app's data,
and to construct data pipelines that are not possible within the App Engine
execution environment.
When you run your custom job under etl.py in this way, it authenticates
against the remove server, prompting the user for credentials if necessary.
It then configures the local environment so RPCs execute against the
requested remote endpoint.
It then imports your custom job. Your job must be a Python class that is
a child of this class. Before invoking etl.py, you must configure sys.path
so all required libraries are importable. See etl.py for details. Your
class must override main() with the computations you want your job to
perform.
You invoke your custom job via etl.py:
$ python etl.py run path.to.my.Job /cs101 myapp server.appspot.com \
--job_args='more_args --delegated_to my.Job'
Before main() is executed, arguments are parsed. The full set of parsed
arguments passed to etl.py are available in your job as self.etl_args. The
arguments passed as a quote-enclosed string to --job_args, if any, are
delegated to your job. An argument parser is available as self.parser. You
must override self._configure_parser to register command-line arguments for
parsing. They will be parsed in advance of running main() and will be
available as self.args.
See tools/etl/examples.py for some nontrivial sample job implementations.
"""
def __init__(self, parsed_etl_args):
"""Constructs a new job.
Args:
parsed_etl_args: argparse.Namespace. Parsed arguments passed to
etl.py.
"""
self._parsed_args = None
self._parsed_etl_args = parsed_etl_args
self._parser = None
def _configure_parser(self):
"""Configures custom command line parser for this job, if any.
For example:
self.parser.add_argument(
'my_arg', help='A required argument', type=str)
"""
pass
def main(self):
"""Computations made by this job; must be overridden in subclass."""
pass
@property
def args(self):
"""Returns etl.py's parsed --job_args, or None if run() not invoked."""
return self._parsed_args
@property
def etl_args(self):
"""Returns parsed etl.py arguments."""
return self._parsed_etl_args
@property
def parser(self):
"""Returns argparse.ArgumentParser, or None if run() not yet invoked."""
if not self._parser:
self._parser = argparse.ArgumentParser(
prog='%s.%s' % (
self.__class__.__module__, self.__class__.__name__),
usage=(
'etl.py run %(prog)s [etl.py options] [--job_args] '
'[%(prog)s options]'))
return self._parser
def _parse_args(self):
self._configure_parser()
self._parsed_args = self.parser.parse_args(
self._parsed_etl_args.job_args)
def run(self):
"""Executes the job; called for you by etl.py."""
self._parse_args()
self.main()
class _ProgressReporter(object):
"""Provide intermittent reports on progress of a long-running operation."""
def __init__(self, logger, verb, noun, chunk_size, total, num_history=10):
self._logger = logger
self._verb = verb
self._noun = noun
self._chunk_size = chunk_size
self._total = total
self._num_history = num_history
self._rate_history = []
self._start_time = self._chunk_start_time = time.time()
self._total_count = 0
self._chunk_count = 0
def count(self, quantity=1):
self._total_count += quantity
self._chunk_count += quantity
while self._chunk_count >= self._chunk_size:
now = time.time()
self._chunk_count -= self._chunk_size
self._rate_history.append(now - self._chunk_start_time)
self._chunk_start_time = now
while len(self._rate_history) > self._num_history:
del self._rate_history[0]
self.report()
def get_count(self):
return self._total_count
def report(self):
now = time.time()
total_time = datetime.timedelta(
days=0, seconds=int(now - self._start_time))
if not sum(self._rate_history):
rate = 0
time_left = 0
expected_total = 0
else:
rate = ((len(self._rate_history) * self._chunk_size) /
sum(self._rate_history))
time_left = datetime.timedelta(
days=0,
seconds=int((self._total - self._total_count) / rate))
expected_total = datetime.timedelta(
days=0, seconds=int(self._total / rate))
self._logger.info(
'%(verb)s %(total_count)9d of %(total)d %(noun)s '
'in %(total_time)s. Recent rate is %(rate)d/sec; '
'%(time_left)s seconds to go '
'(%(expected_total)s total) at this rate.' %
{
'verb': self._verb,
'total_count': self._total_count,
'total': self._total,
'noun': self._noun,
'total_time': total_time,
'rate': rate,
'time_left': time_left,
'expected_total': expected_total
})
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of custom MapReduce jobs."""
__author__ = [
'juliaoh@google.com (Julia Oh)',
]
import sys
import mapreduce
from models import transforms
# Int. Longest GeoMOOC video is ~8 minutes.
_POS_LIMIT_SECONDS = 60 * 8
# Str. Constant str key to accumulate all values under one key in reduce.
_SUM = 'sum'
class CsvStudentEventAccumulationPipeline(mapreduce.CsvGenerator):
"""Example MapReduce pipeline class for histogram of event instance counts.
This MR pipeline:
1. Takes in EventEntity as input source.
2. Counts the total number of event instances per user.
3. Aggregates event counts across all users.
4. Creates a histogram from aggregated event counts.
5. Flattens the histogram.
6. Format the histogram into CSV.
"""
# List of sources of Youtube video data in EventEntity.
_VIDEO_SOURCES = [
'tag-youtube-milestone',
'tag-youtube-event'
]
def run(self, job):
"""Runs the mapreduce pipeline job."""
# Validate input source and output directory. mrs. MapReduce framework
# expects the value 1 if the job fails. If the input source is empty or
# the output directory cannot be found, the job is a failure and must
# return 1.
source = self.input_data(job)
if not source:
return 1
outdir = self.output_dir()
if not outdir:
return 1
# Set the configuration for pipeline.
# First MapReduce phase: filter out event data and aggregate by user
# key.
user_to_event_instance_count = job.map_data(
source, self.map_userid_to_1)
source.close()
total_event_counts_per_user = job.reduce_data(
user_to_event_instance_count, self.sum_event_instances_per_user)
user_to_event_instance_count.close()
# Second MapReduce phase: Create a histogram with aggregated data.
aggregated_event_counts = job.map_data(
total_event_counts_per_user,
self.map_all_event_counts_to_single_key
)
total_event_counts_per_user.close()
histogram = job.reduce_data(
aggregated_event_counts,
self.create_histogram_from_aggregated_event_counts
)
aggregated_event_counts.close()
# Third MapReduce phase: Flatten the data and format it to CSV. This
# phase calls the map() and reduce() functions defined in CsvGenerator
# class.
flattened_histogram = job.map_data(histogram, self.map)
histogram.close()
histogram_csv = job.reduce_data(
flattened_histogram, self.reduce, format=mapreduce.CsvWriter,
outdir=outdir)
flattened_histogram.close()
histogram_csv.close()
# Run the job with above configurations. job.wait() does not return any
# value until the entire job is done. Partial progress of each phases
# will be printed while the job is running.
ready = []
while not ready:
ready = job.wait(histogram_csv, timeout=2.0)
first_map_percent = 100 * job.progress(user_to_event_instance_count)
first_reduce_percent = 100 * job.progress(
total_event_counts_per_user)
second_map_percent = 100 * job.progress(aggregated_event_counts)
second_reduce_percent = 100 * job.progress(histogram)
third_map_percent = 100 * job.progress(flattened_histogram)
third_reduce_percent = 100 * job.progress(histogram_csv)
string_map = {
'map1_name': self.map_userid_to_1.__name__,
'map1_progress': first_map_percent,
'reduce1_name': self.sum_event_instances_per_user.__name__,
'reduce1_progress': first_reduce_percent,
'map2_name': self.map_all_event_counts_to_single_key.__name__,
'map2_progress': second_map_percent,
'reduce2_name': (
self.create_histogram_from_aggregated_event_counts.__name__
),
'reduce2_progress': second_reduce_percent,
'map3_name': self.map.__name__,
'map3_progress': third_map_percent,
'reduce3_name': self.reduce.__name__,
'reduce3_progress': third_reduce_percent
}
print (
'%(map1_name)s: %(map1_progress).1f complete. \n'
'%(reduce1_name)s: %(reduce1_progress).1f complete. \n'
'%(map2_name)s: %(map2_progress).1f complete. \n'
'%(reduce2_name)s: %(reduce2_progress).1f complete. \n'
'csv_%(map3_name)s: %(map3_progress).1f complete. \n'
'csv_%(reduce3_name)s: %(reduce3_progress).1f complete. \n' %
string_map
)
sys.stdout.flush()
return 0
def map_userid_to_1(self, unused_key, value):
"""Maps user_id to value of 1.
Args:
unused_key: int. Line number of EventEntity JSON object in file.
value: str. Instance of EventEntity extracted from file.
Yields:
A tuple of (user_id, 1).
Value of 1 represents one instance of event for the user.
"""
json = self.json_parse(value)
if json and json['user_id']:
if json['source'] in self._VIDEO_SOURCES:
video_data = transforms.loads(json['data'])
if video_data['position'] > _POS_LIMIT_SECONDS:
# Filter bad data from YouTube API.
return
yield json['user_id'], 1
def sum_event_instances_per_user(self, unused_key, values):
"""Sums up number of entity instances per student.
Args:
unused_key: str. Represents user_id.
values: An iterator over entity instance counts per student.
Yields:
A dict with key value pair as:
key: constant string literal 'sum'
value: int. Total number of entity instances.
"""
yield {_SUM: sum(values)}
def map_all_event_counts_to_single_key(self, unused_key, value):
yield _SUM, value[_SUM]
def create_histogram_from_aggregated_event_counts(self, unused_key, values):
"""Creates a histogram from event entity instance counts.
Args:
unused_key: str. Constant string 'key' emitted by mapper2.
values: An iterator over list of integer event instance counts.
Yields:
A serialized JSON representation of python dictionary. The keys of
the python dict are indices of the histogram interval, and the
corresponding values are number of events that are in that interval.
An example output looks like: {0: 10, 1: 15, 2: 100}
"""
# Histogram bucket size is 50 events.
histogram = mapreduce.Histogram(50)
for value in values:
histogram.add(value)
yield transforms.dumps(
{index: value for index, value in enumerate(
histogram.to_noise_filtered_list())})
class CsvStudentEventsHistogram(mapreduce.MapReduceJob):
"""MapReduce Job that generates a histogram for event counts per student.
Usage:
python etl.py run tools.etl.mapreduce_examples.StudentEventsHistogram \
/coursename appid server.appspot.com \
--job_args='path_to_EventEntity.json path_to_output_directory'
"""
MAPREDUCE_CLASS = CsvStudentEventAccumulationPipeline
class StudentDurationAccumulationPipeline(mapreduce.MapReduceBase):
"""Sums up amount of time spent on course per student.
This pipeline:
1. Takes an EventEntity file as input.
2. Sum up all valid page-visit duration values per user.
3. Aggregate summed up duration values across all users.
4. Create a histogram with these values.
"""
# Str. Source of event in EventEntity generated during a page visit.
_VISIT_PAGE = 'visit-page'
# Int. A hard limit for duration value on visit-page events to filter
# misleading data. If a user keeps the browser open and goes idle, duration
# values can get very large.
_DURATION_MINUTES_LIMIT = 30
def run(self, job):
"""Runs the mapreduce pipeline job."""
# Validate input source and output directory.
source = self.input_data(job)
if not source:
return 1
outdir = self.output_dir()
if not outdir:
return 1
# Set the configuration for pipeline.
# First MapReduce phase: filter out page-visit duration values and
# accumulate under user key.
user_to_duration = job.map_data(source, self.map_user_to_duration)
source.close()
user_to_total_duration = job.reduce_data(
user_to_duration, self.sum_total_duration_per_user)
user_to_duration.close()
# Second MapReduce phase: Create a histogram with aggregated duration
# values from all users.
aggregated_duration_values = job.map_data(
user_to_total_duration,
self.map_all_user_duration_total_to_single_key
)
user_to_total_duration.close()
histogram = job.reduce_data(
aggregated_duration_values,
self.create_histogram_from_duration_distribution,
outdir=outdir,
format=mapreduce.JsonWriter
)
aggregated_duration_values.close()
histogram.close()
# Run the job with above configurations.
ready = []
while not ready:
ready = job.wait(histogram, timeout=2.0)
first_map_percent = 100 * job.progress(user_to_duration)
first_reduce_percent = 100 * job.progress(user_to_total_duration)
second_map_percent = 100 * job.progress(aggregated_duration_values)
second_reduce_percent = 100 * job.progress(histogram)
string_map = {
'map1_name': self.map_user_to_duration.__name__,
'map1_progress': first_map_percent,
'reduce1_name': self.sum_total_duration_per_user.__name__,
'reduce1_progress': first_reduce_percent,
'map2_name': (
self.map_all_user_duration_total_to_single_key.__name__
),
'map2_progress': second_map_percent,
'reduce2_name': (
self.create_histogram_from_duration_distribution.__name__
),
'reduce2_progress': second_reduce_percent
}
print (
'%(map1_name)s: %(map1_progress).1f complete. \n'
'%(reduce1_name)s: %(reduce1_progress).1f complete. \n'
'%(map2_name)s: %(map2_progress).1f complete. \n'
'%(reduce2_name)s: %(reduce2_progress).1f complete. \n' %
string_map
)
sys.stdout.flush()
return 0
def map_user_to_duration(self, unused_key, value):
"""Maps user_id to duration value in 'visit-page' events.
Args:
unused_key: int. Line number of EventEntity JSON object in file.
value: str. Instance of EventEntity extracted from file.
Yields:
A tuple of (user_id, valid duration value in minutes).
Valid duration value is defined as positive integer duration values
that are less than _DURATION_MINUTES_LIMIT. Duration values are
validated to filter noisy data.
"""
json = self.json_parse(value)
if json and json['user_id'] and json['source'] == self._VISIT_PAGE:
event_data = transforms.loads(json['data'])
# Convert duration in milliseconds to minutes.
duration_minutes = event_data['duration'] // (1000 * 60)
if (duration_minutes <= self._DURATION_MINUTES_LIMIT and
duration_minutes > 0):
yield json.pop('user_id'), duration_minutes
def sum_total_duration_per_user(self, unused_key, values):
"""Sums up number of entity instances per student.
Args:
unused_key: str. Represents user_id.
values: An iterator over entity instance counts per student.
Yields:
A dict with key value pair as:
key: constant string literal 'sum'
value: int. Total number of entity instances.
"""
yield {_SUM: sum(values)}
def map_all_user_duration_total_to_single_key(self, unused_key, value):
yield _SUM, value[_SUM]
def create_histogram_from_duration_distribution(self, unused_key, values):
"""Creates a histogram from summed up duration values.
Args:
unused_key: str. Constant string 'sum' emitted by
map_all_user_duration_total_to_single_key().
values: An iterator over list of summed up duration values.
Yields:
A serialized JSON representation of python dictionary. The keys of
the python dict are indices of the histogram interval, and the
corresponding values are number of summed up duration values that
are in that interval index.
An example output looks like:
duration_values = [50, 65, 100, 130]
histogram bucket_size = 60
output: "{0: 1, 1: 2, 2: 1}"
"""
# Histogram bucket size is one hour.
histogram = mapreduce.Histogram(60)
for value in values:
histogram.add(value)
yield {index: value for index, value in enumerate(
histogram.to_noise_filtered_list())}
class StudentPageDurationHistogram(mapreduce.MapReduceJob):
"""MapReduce Job that generates a histogram for time spent on course pages.
Usage:
python etl.py run \
tools.etl.mapreduce_examples.StudentPageDurationHistogram \
/coursename appid server.appspot.com \
--job_args='path_to_EventEntity.json path_to_output_directory'
"""
MAPREDUCE_CLASS = StudentDurationAccumulationPipeline
class WordCount(mapreduce.MapReduceBase):
"""Counts word frequency in input.
Output is plain text of the format:
word1: count1
word2: count2
...
wordn: countn
"""
# Since JSON is our usual interchange format, mapreduce.JsonWriter is our
# default output writer. For this canonical example, however, we'll override
# this and emit plain text instead.
WRITER_CLASS = mapreduce.TextWriter
def map(self, unused_key, value):
# value is one line of the input file. We break it into tokens and
# convert each token to lowercase in order to treat 'To' and 'to' as
# equivalent.
tokens = [x.lower() for x in value.split()]
for token in tokens:
# Both map and reduce yield rather than return. map yields a
# (key, value) 2-tuple. In this case, key is the token and value is
# always 1, indicating that we've seen the token once per
# occurrence.
yield token, 1
def reduce(self, key, values):
# key will be a token and values will be a list of 1s -- one for each
# time map saw the token. Like map, reduce yields rather than returning.
# In this case we yield a plain string containing the token and the sum
# of its 1s for the WRITER_CLASS to output.
yield '%s: %s' % (key, sum(values))
class WordCountJob(mapreduce.MapReduceJob):
"""MapReduce Job that illustrates simple word count of input.
Usage:
python etl.py run \
tools.etl.mapreduce_examples.WordCount \
/coursename appid server.appspot.com \
--job_args='path/to/input.file path/to/output/directory'
"""
MAPREDUCE_CLASS = WordCount
class YoutubeHistogramGenerator(mapreduce.MapReduceBase):
"""Generates time histogram of user video engagement.
Input file: EventEntity JSON file.
Each event has a 'source' that defines a place in a code where the event
was recorded. Each event has a 'user_id' to represent an actor who
triggered the event. The event 'data' is a JSON object and its format and
content depends on the type of the event. For YouTube video events, 'data'
is a dictionary with 'video_id', 'instance_id', 'event_id', 'position',
'data', 'location'.
"""
# String. Event source value for YouTube videos in EventEntity.json.
_YOUTUBE_MILESTONE_SOURCE = 'tag-youtube-milestone'
def map(self, unused_key, value):
"""Filters out YouTube video data from EventEntity JSON file.
Args:
unused_key: int. line number of each EventEntity in file.
value: str. instance of EventEntity extracted from file.
Yields:
A tuple of (video_identifier, time_position) to be passed into
reduce function.
Video_identifier is a tuple of YouTube video_id and instance_id,
and time_position is the video playhead count.
"""
json = self.json_parse(value)
if json and json['source'] == self._YOUTUBE_MILESTONE_SOURCE:
data = transforms.loads(json['data'])
video_identifier = (data['video_id'], data['instance_id'])
playhead_position = data['position']
if (playhead_position <= _POS_LIMIT_SECONDS and
# Youtube API may return NaN if value couldn't be computed.
playhead_position != float('nan')):
yield video_identifier, playhead_position
def reduce(self, key, values):
"""Creates a histogram from time_position values.
The value of _BUCKET_SIZE comes from the constant
GCB_VIDEO_TRACKING_CHUNK_SEC in youtube_video.js. This value indicates
the interval of the milestone events. If GCB_VIDEO_TRACKING_CHUNK_SEC
changes, _BUCKET_SIZE will have to be updated accordingly.
Args:
key: tuple. video_id, video instance id.
values: An iterator over integer video playhead positions.
Yields:
A dictionary with video_id, instance_id, and histogram.
The time histogram is a list in which each index represents
sequential milestone events and the corresponding item at each
index represents the number of users watching the video.
An example output looks like:
{'video_id': 123456, 'instance_id': 0, 'histogram': [10, 8, 7, 5, 2, 1]}
"""
# Bucket size is 30 seconds, the value of GCB_VIDEO_TRACKING_CHUNK_SEC
# in youtube_video.js.
histogram = mapreduce.Histogram(30)
for value in values:
histogram.add(value)
yield {
'video_id': key[0],
'instance_id': key[1],
'histogram': histogram.to_list()
}
class YoutubeHistogram(mapreduce.MapReduceJob):
"""MapReduce job that generates a histogram for user video engagement.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce_examples.YoutubeHistogram \
/coursename appid server.appspot.com \
--job_args='path_to_EventEntity.json path_to_output_directory'
"""
MAPREDUCE_CLASS = YoutubeHistogramGenerator
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MapReduce extensions for ETL."""
__author__ = [
'johncox@google.com (John Cox)',
'juliaoh@google.com (Julia Oh)',
]
import csv
import os
import sys
from xml.etree import ElementTree
import mrs
from models import transforms
from tools.etl import etl_lib
class MapReduceJob(etl_lib.Job):
"""Parent classes for custom jobs that run a mapreduce.
Usage:
python etl.py run path.to.my.job / appid server.appspot.com \
--disable_remote \
--job_args='path_to_input_file path_to_output_directory'
"""
# Subclass of mrs.MapReduce; override in child.
MAPREDUCE_CLASS = None
def _configure_parser(self):
"""Shim that works with the arg parser expected by mrs.Mapreduce."""
self.parser.add_argument(
'file', help='Absolute path of the input file', type=str)
self.parser.add_argument(
'output', help='Absolute path of the output directory', type=str)
def main(self):
if not os.path.exists(self.args.file):
sys.exit('Input file %s not found' % self.args.file)
if not os.path.exists(self.args.output):
sys.exit('Output directory %s not found' % self.args.output)
mrs.main(self.MAPREDUCE_CLASS, args=self._parsed_etl_args.job_args)
class JsonWriter(mrs.fileformats.Writer):
"""Outputs one JSON literal per line.
Example JSON output may look like:
{'foo': 123, 'bar': 456, 'quz': 789}
{'foo': 321, 'bar': 654, 'quz': 987}
.
.
.
{'foo': 456, 'bar': 534, 'quz': 154}
"""
ext = 'json'
def __init__(self, fileobj, *args, **kwds):
super(JsonWriter, self).__init__(fileobj, *args, **kwds)
def _write_json(self, write_fn, python_object):
"""Writes serialized JSON representation of python_object to file.
Args:
write_fn: Python file object write() method.
python_object: object. Contents to write. Must be JSON-serializable.
Raises:
TypeError: if python_object is not a dict or a list.
"""
if isinstance(python_object, dict):
write_fn(unicode(
transforms.dumps(python_object) + '\n').encode('utf-8'))
elif isinstance(python_object, list):
for item in python_object:
self._write_json(write_fn, item)
else:
raise TypeError('Value must be a dict or a list of dicts.')
def writepair(self, kvpair, **unused_kwds):
unused_key, value = kvpair
self._write_json(self.fileobj.write, value)
class TextWriter(mrs.fileformats.TextWriter):
"""A simplified plain text writer."""
ext = 'txt' # Use the expected extension rather than mrs' mtxt default.
def writepair(self, pair, **unused_kwargs):
_, value = pair
# Write the value exactly rather than always prefixing it with the key.
self.fileobj.write(unicode(value).encode('utf-8') + os.linesep)
class MapReduceBase(mrs.MapReduce):
"""Common functionalities of MR jobs combined into one class."""
# Subclass of mrs.fileformats.Writer. The writer used to format output.
WRITER_CLASS = JsonWriter
def json_parse(self, value):
"""Parses JSON file into Python."""
if value.strip()[-1] == ',':
value = value.strip()[:-1]
try:
return transforms.loads(value)
# Skip unparseable rows like the first and last
# pylint: disable=bare-except
except:
return None
def make_reduce_data(self, job, interm_data):
"""Change the outout format to JSON."""
outdir = self.output_dir()
output_data = job.reduce_data(
interm_data, self.reduce, outdir=outdir, format=self.WRITER_CLASS)
return output_data
class Histogram(object):
"""Histogram that bins values into _bucket_size sized intervals."""
# Int. Number of consecutive zeros in list of integer values to determine
# the cutoff point.
_NUM_ZEROS = 3
def __init__(self, bucket_size):
# Map of 0-indexed bin #int -> count int
self._values = {}
self._bucket_size = bucket_size
def add(self, value):
"""Adds value into self._values."""
bin_number = self._get_bin_number(value)
self._increment_bin(bin_number)
def _get_bin_number(self, value):
"""Returns appropriate bin number for given value."""
if value < 0:
raise ValueError('Cannot calculate index for negative value')
return max(0, (value - 1) // self._bucket_size)
def _increment_bin(self, n):
self._values[n] = self._values.get(n, 0) + 1
def to_list(self):
"""Returns self._values converted into a list, sorted by its keys."""
try:
max_key = max(self._values.iterkeys())
return [self._values.get(n, 0) for n in xrange(0, max_key+1)]
except ValueError:
return []
def to_noise_filtered_list(self):
"""Converts self._values to a list with junk data removed.
Returns:
self.to_list(), with junk data removed
"Junk data" refers to noise in EventEntity data caused by API
misbehaviors and certain user behavior. Two known issues are:
1. Youtube video data from event source 'tag-youtube-video' and
'tag-youtube-milestone' represent user engagement at certain playhead
positions. Youtube API continues to emit these values even when the
video has stopped playing, causing a trail of meaningless values in
the histogram.
2. Data from event source 'visit-page' logs duration of a page visit.
If a user keeps the browser open and goes idle, the duration value
recorded is skewed since the user wasn't engaged. These values tend
to be significantly larger than more reliable duration values.
This method filters the long trail of insignificant data by counting
number of consecutive zeros set in self._NUM_ZEROS and disregarding
any data after the zeros.
Example:
self.to_list() returns [1, 2, 3, 4, 5, 0, 0, 0, 0, 1]
_NUM_ZEROS = 3
output = [1, 2, 3, 4, 5]
"""
zero_counts = 0
cutoff_index = 0
values = self.to_list()
for index, value in enumerate(values):
if value == 0:
zero_counts += 1
if zero_counts == 1:
cutoff_index = index
if zero_counts == self._NUM_ZEROS:
return values[:cutoff_index]
else:
cutoff_index = 0
zero_counts = 0
return values
class XmlWriter(mrs.fileformats.Writer):
"""Writes file in XML format.
The writer does not use the key from kvpair and expects the value to be a
list of string representation of XML elements.
Example:
kvpair: some_key, ['<row><name>Jane</name></row>',
'<row><name>John</name></row>']
Output:
<rows>
<row>
<name>Jane</name>
</row>
<row>
<name>John</name>
</row>
</rows>
"""
ext = 'xml'
def __init__(self, fileobj, *args, **kwds):
super(XmlWriter, self).__init__(fileobj, *args, **kwds)
self.fileobj.write('<rows>')
def writepair(self, kvpair, **unused_kwds):
unused_key, values = kvpair
write = self.fileobj.write
for value in values:
write(value)
write('\n')
def finish(self):
self.fileobj.write('</rows>')
self.fileobj.flush()
class XmlGenerator(MapReduceBase):
"""Generates a XML file from a JSON formatted input file."""
WRITER_CLASS = XmlWriter
def map(self, key, value):
"""Converts JSON object to xml.
Args:
key: int. line number of the value in Entity file.
value: str. A line of JSON literal extracted from Entity file.
Yields:
A tuple with the string 'key' and a tuple containing line number and
string representaiton of the XML element.
"""
json = self.json_parse(value)
if json:
root = ElementTree.Element('row')
transforms.convert_dict_to_xml(root, json)
yield 'key', (key, ElementTree.tostring(root, encoding='utf-8'))
def reduce(self, unused_key, values):
"""Sorts the values by line number to keep the order of the document.
Args:
unused_key: str. The arbitrary string 'key' set to accumulate all
values under one key.
values: list of tuples. Each tuple contains line number and JSON
literal converted to XML string.
Yields:
A list of XML strings sorted by the line number.
"""
sorted_values = sorted(values, key=lambda x: x[0])
yield [value[1] for value in sorted_values]
class JsonToXml(MapReduceJob):
"""MapReduce Job that converts JSON formatted Entity files to XML.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.JsonToXml \
/coursename appid server.appspot.com \
--job_args='path_to_any_Entity_file path_to_output_directory'
"""
MAPREDUCE_CLASS = XmlGenerator
class CsvWriter(mrs.fileformats.Writer):
"""Writes file in CSV format.
The default value to be written if the dictionary is missing a key is an
empty string.
Example:
kvpair: (some_key, (['bar', 'foo', 'quz'],
[{'foo': 1, 'bar': 2, 'quz': 3},
{'bar': 2, 'foo': 3}])
Output:
'bar', 'foo', 'quz'
2, 1, 3
2, 3, ''
"""
ext = 'csv'
def __init__(self, fileobj, *args, **kwds):
super(CsvWriter, self).__init__(fileobj, *args, **kwds)
def writepair(self, kvpair, **unused_kwds):
"""Writes list of JSON objects to CSV format.
Args:
kvpair: tuple of unused_key, and a tuple of master_list and
json_list. Master_list is a list that contains all the
fieldnames across json_list sorted in alphabetical order, and
json_list is a list of JSON objects.
**unused_kwds: keyword args that won't be used.
"""
unused_key, (master_list, json_list) = kvpair
writer = csv.DictWriter(
self.fileobj, fieldnames=master_list, restval='')
writer.writeheader()
writer.writerows(json_list)
class CsvGenerator(MapReduceBase):
"""Generates a CSV file from a JSON formatted input file."""
WRITER_CLASS = CsvWriter
@classmethod
def _flatten_json(cls, _dict, prefix=''):
"""Flattens dict and contained JSON; encodes all values in utf-8."""
for key in _dict.keys():
value = _dict.pop(key)
_nested = None
if type(value) == dict:
_nested = value
else:
try:
_dict_from_value = transforms.loads(value, strict=False)
if _dict_from_value and type(_dict_from_value) == dict:
_nested = _dict_from_value
except: # pylint: disable=bare-except
pass
if _nested:
flattened = cls._flatten_json(
_nested, prefix=prefix + key + '_')
_dict.update(flattened)
else:
_dict[prefix + key] = unicode(value).encode('utf-8')
return _dict
def map(self, unused_key, value):
"""Loads JSON object and flattens it.
Example:
json['data']['foo'] = 'bar' -> json['data_foo'] = 'bar', with
json['data'] removed.
Args:
unused_key: int. line number of the value in Entity file.
value: str. instance of Entity file extracted from file.
Yields:
A tuple of string key and flattened dictionary. map() outputs
constant string 'key' as the key so that all the values can be
accumulated under one key in reduce(). This accumulation is
necessary because reduce() must go through the list of all JSON
literals and determine all existing fieldnames. Then, reduce()
supplies the master_list of fieldnames to CSVWriter's writepair()
which uses the list as csv header.
"""
json = self.json_parse(value)
if json:
json = CsvGenerator._flatten_json(json)
yield 'key', json
def reduce(self, unused_key, values):
"""Creates a master_list of all the keys present in an Entity file.
Args:
unused_key: str. constant string 'key' emitted by map().
values: a generator over list of json objects.
Yields:
A tuple of master_list and list of json objects.
master_list is a list of all keys present across every json object.
This list is used to create header for CSV files.
"""
master_list = []
values = [value for value in values]
for value in values:
for key in value:
if key not in master_list:
master_list.append(key)
try:
# Convert integer keys from unicode to ints to be sorted correctly.
# pylint: disable=unnecessary-lambda
master_list = sorted(master_list, key=lambda item: int(item))
except ValueError:
# String keys cannot be converted into integers..
master_list = sorted(master_list)
yield master_list, values
class JsonToCsv(MapReduceJob):
"""MapReduce Job that converts JSON formatted Entity files to CSV format.
Usage: run the following command from the app root folder.
python tools/etl/etl.py run tools.etl.mapreduce.JsonToCsv
/coursename appid server.appspot.com \
--job_args='path_to_an_Entity_file path_to_output_directory'
"""
MAPREDUCE_CLASS = CsvGenerator
mrs.fileformats.writer_map['csv'] = CsvWriter
mrs.fileformats.writer_map['json'] = JsonWriter
mrs.fileformats.writer_map['txt'] = TextWriter
mrs.fileformats.writer_map['xml'] = XmlWriter
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extract-transform-load utility.
There are four features:
1. Download and upload of Course Builder 1.3+ data:
$ python etl.py download course /cs101 myapp server.appspot.com archive.zip
This will result in a file called archive.zip that contains the files that make
up the Course Builder 1.3+ course found at the URL /cs101 on the application
with id myapp running on the server named server.appspot.com. archive.zip will
contain assets and data files from the course along with a manifest.json
enumerating them. The format of archive.zip will change and should not be relied
upon.
For upload of course and related data
$ python etl.py upload course /cs101 myapp server.appspot.com \
--archive_path archive.zip
2. Download of datastore entities. This feature is experimental.
$ python etl.py download datastore /cs101 myapp server.appspot.com \
--archive_path archive.zip --datastore_types model1,model2
This will result in a file called archive.zip that contains a dump of all model1
and model2 instances found in the specified course, identified as above. The
archive will contain serialized data along with a manifest. The format of
archive.zip will change and should not be relied upon.
By default, all data types are downloaded. You can specifically select or
skip specific types using the --datastore_types and --exclude_types flags,
respectively.
3. Upload of datastore entities. This feature is experimental.
$ python etl.py upload datastore /cs101 myapp server.apppot.com \
--archive_path archive.zip
Uploads should ideally be (but are not required to be) done to courses that
are not available to students and which are not actively being edited by
admins. To upload to a course, it must first exist. You can create a blank
new course using the administrator UI. Note that keys and fields that use PII
are obscured during download if --privacy_secret is used, and not obscured if
not. Uploading of multiple downloads to the same course is supported, but
for encoded references to work, all uploads must have been created with
the same --privacy_secret (or all with no secret).
Other flags for uploading are recommended:
--resume: Use this flag to permit an upload to resume where it left off.
--force_overwrite: Unless this flag is specified, every entity to be
uploaded is checked to see whether an entity with this key already
exists in the datastore. This takes substantial additional time.
If you are sure that there will not be any overlap between existing data
and uploaded data, use of this flag is strongly recommended.
--batch_size=<NNN>: Set this to larger values to group uploaded entities
together for efficiency. Higher values help, but give diminishing
returns. Start at around 100.
--datastore_types: and/or --exclude_types By default, all types in the
specified .zip file are uploaded. You may select or ignore specific types
with these flags, respectively.
Data extracted from courses running an older version of CourseBuilder
may contain entities of types that no longer exist in the code base of a
more-recent CourseBuilder installation. Depending on the nature of the
change, this extra data may simply be no longer needed. Conversely, the
data may be crucial to the correct operation of an older, un-upgraded
installation, and will simply not work with a newer version of code.
Using a specific set of types can permit upload of the entities that are
still recognized. Operation of CourseBuilder with the partial data may
well be compromised for some or all functionality; it is safest to
upload the data to a blank course and experiment before uploading
incomplete data to a production instance.
For example, you may wish to upload to a local instance to test things out
before uploading to a production installation:
./scripts/etl.sh --force_overwrite --batch_size=100 --resume \
--exclude_types=RootUsageEntity,KeyValueEntity,DefinitionEntity,UsageEntity \
--archive_path my_archive_file.zip \
upload datastore /new_course mycourse localhost:8081
4. Deletion of all datastore entities in a single course. Delete of the course
itself not supported. To run:
$ python etl.py delete datastore /cs101 myapp server.appspot.com
Before delete commences, you will be told what entity kinds will be deleted and
you will be prompted for confirmation. Note that this process is irreversible,
and, if interrupted, may leave the course in an invalid state. Note also that it
races with writes against your datastore unless you first disable writes.
Finally, note that only the datastore entities of the kinds listed will be
deleted, and those will only be deleted from the namespace corresponding to the
target course. Custom entities you added to base Course Builder may or may not
be processed. Entities in the global namespace and those created by App Engine
will not be processed.
Deleting a course flushes caches. Because memcache does not support namespaced
flush all operations, all caches for all courses will be flushed.
5. Execution of custom jobs.
$ python etl.py run path.to.my.Job /cs101 myapp server.appspot.com \
--job_args='more_args --delegated_to my.Job'
This requires that you have written a custom class named Job found in the
directory path/to/my, relative to the Course Builder root. Job's main method
will be executed against the specified course, identified as above. See
etl_lib.Job for more information.
In order to run this script, you must add the following to the head of sys.path:
1. The absolute path of your Course Builder installation.
2. The absolute path of your App Engine SDK.
3. The absolute paths of third party libraries from the SDK used by Course
Builder:
fancy_urllib
jinja2
webapp2
webob
Their locations in the supported 1.9.17 App Engine SDK are
<sdk_path>/lib/fancy_urllib
<sdk_path>/lib/jinja2-2.6
<sdk_path>/lib/webapp2-2.5.2
<sdk_path>/lib/webob-1.2.3
where <sdk_path> is the absolute path of the 1.9.17 App Engine SDK.
4. If you are running a custom job, the absolute paths of all code required
by your custom job, unless covered above.
When running etl.py against a remote endpoint you will be prompted for a
username and password. If the remote endpoint is a development server, you may
enter any username and password. If the remote endpoint is in production, enter
your username and an application-specific password. See
http://support.google.com/accounts/bin/answer.py?hl=en&answer=185833 for help on
application-specific passwords.
Pass --help for additional usage information.
"""
__author__ = [
'johncox@google.com (John Cox)',
]
import argparse
import functools
import logging
import os
import random
import re
import shutil
import sys
import time
import traceback
import zipfile
import yaml
# Placeholders for modules we'll import after setting up sys.path. This allows
# us to avoid lint suppressions at every callsite.
appengine_config = None
common_utils = None
config = None
courses = None
crypto = None
db = None
entity_transforms = None
etl_lib = None
memcache = None
metadata = None
remote = None
sites = None
transforms = None
vfs = None
# String. Prefix for files stored in an archive.
_ARCHIVE_PATH_PREFIX = 'files'
# String. Prefix for models stored in an archive.
_ARCHIVE_PATH_PREFIX_MODELS = 'models'
# String. End of the path to course.json in an archive.
_COURSE_JSON_PATH_SUFFIX = 'data/course.json'
# String. End of the path to course.yaml in an archive.
_COURSE_YAML_PATH_SUFFIX = 'course.yaml'
# String. Message the user must type to confirm datastore deletion.
_DELETE_DATASTORE_CONFIRMATION_INPUT = 'YES, DELETE'
# List of types which are not to be downloaded. These are types which
# are either known to be transient, disposable state classes (e.g.,
# map/reduce's "_AE_... classes), or legacy types no longer required.
_EXCLUDE_TYPES = set([
# Map/reduce internal types:
'_AE_MR_MapreduceState',
'_AE_MR_ShardState',
'_AE_Pipeline_Barrier',
'_AE_Pipeline_Record',
'_AE_Pipeline_Slot',
'_AE_Pipeline_Status',
# AppEngine internal background jobs queue
'_DeferredTaskEntity',
])
# Function that takes one arg and returns it.
_IDENTITY_TRANSFORM = lambda x: x
# Regex. Format of __internal_names__ used by datastore kinds.
_INTERNAL_DATASTORE_KIND_REGEX = re.compile(r'^__.*__$')
# Names of fields in row which should be ignored when importing datastore.
_KEY_FIELDS = set(['key.id', 'key.name', 'key'])
# Path prefix strings from local disk that will be included in the archive.
_LOCAL_WHITELIST = frozenset([_COURSE_YAML_PATH_SUFFIX, 'assets', 'data'])
# Path prefix strings that are subdirectories of the whitelist that we actually
# want to exclude because they aren't userland code and will cause conflicts.
_LOCAL_WHITELIST_EXCLUDES = frozenset(['assets/lib'])
# logging.Logger. Module logger.
_LOG = logging.getLogger('coursebuilder.tools.etl')
logging.basicConfig()
# List of string. Valid values for --log_level.
_LOG_LEVEL_CHOICES = ['DEBUG', 'ERROR', 'INFO', 'WARNING']
# String. Name of the manifest file.
_MANIFEST_FILENAME = 'manifest.json'
# String. Identifier for delete mode.
_MODE_DELETE = 'delete'
# String. Identifier for download mode.
_MODE_DOWNLOAD = 'download'
# String. Identifier for custom run mode.
_MODE_RUN = 'run'
# String. Identifier for upload mode.
_MODE_UPLOAD = 'upload'
# List of all modes.
_MODES = [_MODE_DELETE, _MODE_DOWNLOAD, _MODE_RUN, _MODE_UPLOAD]
# List of modes where --force_overwrite is supported:
_FORCE_OVERWRITE_MODES = [_MODE_DOWNLOAD, _MODE_UPLOAD]
# Int. The number of times to retry remote_api calls.
_RETRIES = 3
# String. Identifier for type corresponding to course definition data.
_TYPE_COURSE = 'course'
# String. Identifier for type corresponding to datastore entities.
_TYPE_DATASTORE = 'datastore'
# Number of items upon which to emit upload rate statistics.
_UPLOAD_CHUNK_SIZE = 1000
# We support .zip files as one archive format.
ARCHIVE_TYPE_ZIP = 'zip'
# We support plain UNIX directory structure as an archive format
ARCHIVE_TYPE_DIRECTORY = 'directory'
# The list of all supported archive formats
_ARCHIVE_TYPES = [
ARCHIVE_TYPE_ZIP,
ARCHIVE_TYPE_DIRECTORY,
]
# Name of flag used to gate access to less-generally-useful features.
INTERNAL_FLAG_NAME = '--internal'
def create_args_parser():
# Command-line argument configuration.
parser = argparse.ArgumentParser()
parser.add_argument(
'mode', choices=_MODES,
help='Indicates the kind of operation we are performing', type=str)
parser.add_argument(
'type',
help=(
'Type of entity to process. If mode is %s or %s, should be one of '
'%s or %s. If mode is %s, should be an importable dotted path to '
'your etl_lib.Job subclass') % (
_MODE_DOWNLOAD, _MODE_UPLOAD, _TYPE_COURSE, _TYPE_DATASTORE,
_MODE_RUN),
type=str)
parser.add_argument(
'course_url_prefix',
help=(
"URL prefix of the course you want to download (e.g. '/foo' in "
"'course:/foo:/directory:namespace'"), type=str)
parser.add_argument(
'application_id',
help="The id of the application to read from (e.g. 'myapp')", type=str)
parser.add_argument(
'server',
help=(
'The full name of the source application to read from (e.g. '
'myapp.appspot.com)'), type=str)
parser.add_argument(
'--archive_path',
help=(
'Absolute path of the archive file to read or write; required if '
'mode is %s or %s' % (_MODE_DOWNLOAD, _MODE_UPLOAD)), type=str)
parser.add_argument(
'--batch_size',
help='Number of results to attempt to retrieve per batch',
default=20, type=int)
parser.add_argument(
'--datastore_types', default=[],
help=(
'When type is "%s", comma-separated list of datastore model types '
'to process; all models are processed by default' %
_TYPE_DATASTORE),
type=lambda s: s.split(','))
parser.add_argument(
'--exclude_types', default=[],
help=(
'When type is "%s", comma-separated list of datastore model types '
'to exclude from processing' % _TYPE_DATASTORE),
type=lambda s: s.split(','))
parser.add_argument(
'--disable_remote', action='store_true',
help=(
'If mode is %s, pass this flag to skip authentication and remote '
'environment setup. Should only pass for jobs that run entirely '
'locally and do not require RPCs') % _MODE_RUN)
parser.add_argument(
'--force_overwrite', action='store_true',
help=(
'If mode is download, overwriting of local .zip files is permitted.'
'If mode is upload, forces overwrite of entities '
'on the target system that are also present in the archive. Note '
'that this operation is dangerous and may result in data loss.'))
parser.add_argument(
'--resume', action='store_true',
help=(
'On upload, setting this flag indicates that you are starting or '
'resuming an upload. Only use this flag when you are uploading '
'to a course that had no data prior to starting this upload. This '
'flag assumes that the only data present is that provided by the '
'upload. This permits significant time savings if an upload is '
'interrupted or otherwise needs to be performed in multiple '
'stages.'))
parser.add_argument(
'--job_args', default=[],
help=(
'If mode is %s, string containing args delegated to etl_lib.Job '
'subclass') % _MODE_RUN, type=lambda s: s.split())
parser.add_argument(
'--log_level', choices=_LOG_LEVEL_CHOICES,
help='Level of logging messages to emit', default='INFO',
type=lambda s: s.upper())
parser.add_argument(
'--privacy', action='store_true',
help=(
"When mode is '%s' and type is '%s', passing this flag will strip "
"or obfuscate information that can identify a single user" % (
_MODE_DOWNLOAD, _TYPE_DATASTORE)))
parser.add_argument(
'--privacy_secret',
help=(
"When mode is '%s', type is '%s', and --privacy is passed, pass "
"this secret to have user ids transformed with it rather than with "
"random bits") % (_MODE_DOWNLOAD, _TYPE_DATASTORE), type=str)
parser.add_argument(
'--verbose', action='store_true',
help='Tell about each item uploaded/downloaded.')
parser.add_argument(
INTERNAL_FLAG_NAME, action='store_true',
help=('Enable control flags needed only by developers. '
'Use %s --help to see documentation on these extra flags.' %
INTERNAL_FLAG_NAME))
return parser
def add_internal_args_support(parser):
"""Enable features only suitable for CourseBuilder developers.
This is present as a public function so that functional tests and utilities
that are only for developers to enable internal-only features from Python
code directly.
"""
parser.add_argument(
'--archive_type', default='zip', choices=_ARCHIVE_TYPES,
help=(
'By default, uploads and downloads are done using a single .zip '
'for the archived form of the data. This is convenient, as only '
'that single file needs to be retained and protected. When making '
'functional tests that depend on a constellation of several entity '
'types in complex relationships, it is often much more convenient '
'to create the entities by direct interaction with CourseBuilder, '
'rather than writing code to achieve the same effect. Saving this '
'data out for later use in unit tests is easily accomplished via '
'ETL. However, as code changes, modifications to the stored test '
'values is occasionally necessary. Rather than store the test '
'values as a monolithic opaque binary blob (hard to edit), '
'one may specify --archive_type=directory. This treats the '
'--archive_path argument as a directory, and stores individual '
'files in that directory.'))
parser.add_argument(
'--no_static_files', action='store_true',
help=(
'Do not upload/download static file content, except for special '
'files %s and %s containing the course. Useful for saving space '
'when generating test-case data.' % (
_COURSE_YAML_PATH_SUFFIX, _COURSE_JSON_PATH_SUFFIX)))
def create_configured_args_parser(argv):
"""Creates a parser and configures it for internal use if needed."""
parser = create_args_parser()
if INTERNAL_FLAG_NAME in argv:
add_internal_args_support(parser)
return parser
def _init_archive(path, archive_type=ARCHIVE_TYPE_ZIP):
if archive_type == ARCHIVE_TYPE_ZIP:
return _ZipArchive(path)
elif archive_type == ARCHIVE_TYPE_DIRECTORY:
return _DirectoryArchive(path)
else:
raise ValueError('Archive type "%s" not one of "zip", "directory".' %
archive_type)
class _AbstractArchive(object):
"""Manager for local archives of Course Builder data.
The internal format of the archive may change from version to version; users
must not depend on it.
Archives contain assets and data from a single course, along with a manifest
detailing the course's raw definition string, version of Course Builder the
course is compatible with, and the list of course files contained within
the archive.
# TODO(johncox): possibly obfuscate this archive so it cannot be unzipped
# outside etl.py. Add a command-line flag for creating a zip instead. For
# uploads, require an obfuscated archive, not a zip.
"""
def __init__(self, path):
"""Constructs a new archive.
Args:
path: string. Absolute path where the archive will be written.
"""
self._path = path
@classmethod
def get_external_path(cls, internal_path, prefix=_ARCHIVE_PATH_PREFIX):
"""Gets external path string from results of cls.get_internal_path."""
_prefix = prefix + os.sep
assert internal_path.startswith(_prefix)
return internal_path.split(_prefix)[1]
@classmethod
def get_internal_path(cls, external_path, prefix=_ARCHIVE_PATH_PREFIX):
"""Get path string used in the archive from an external path string.
Generates the path used within an archive for an asset. All assets
(meaning all archive contents except the manifest file) must have
their paths generated this way, and those paths must be re-translated to
external paths via cls.get_external_path before use with systems
external to the archive file.
Args:
external_path: string. Path to generate an internal archive path
from.
prefix: string. Prefix to base the path on.
Returns:
String. Internal archive path.
"""
assert not external_path.startswith(prefix)
return os.path.join(
prefix, _remove_bundle_root(external_path))
def add(self, filename, contents):
"""Adds contents to the archive.
Args:
filename: string. Path of the contents to add.
contents: bytes. Contents to add.
"""
raise NotImplementedError()
def add_local_file(self, local_filename, internal_filename):
"""Adds a file from local disk to the archive.
Args:
local_filename: string. Path on disk of file to add.
internal_filename: string. Internal archive path to write to.
"""
raise NotImplementedError()
def close(self):
"""Closes archive and test for integrity; must close before read."""
raise NotImplementedError()
def get(self, path):
"""Return the raw bytes of the archive entity found at path.
Returns None if path is not in the archive.
Args:
path: string. Path of file to retrieve from the archive.
Returns:
Bytes of file contents.
"""
raise NotImplementedError()
def open(self, mode):
"""Opens archive in the mode given by mode string ('r', 'w', 'a')."""
raise NotImplementedError()
@property
def manifest(self):
"""Returns the archive's manifest."""
return _Manifest.from_json(self.get(_MANIFEST_FILENAME))
@property
def path(self):
return self._path
class _ZipArchive(_AbstractArchive):
def __init__(self, path):
super(_ZipArchive, self).__init__(path)
self._zipfile = None
def add(self, filename, contents):
"""Adds contents to the archive.
Args:
filename: string. Path of the contents to add.
contents: bytes. Contents to add.
"""
self._zipfile.writestr(filename, contents)
def add_local_file(self, local_filename, internal_filename):
"""Adds a file from local disk to the archive.
Args:
local_filename: string. Path on disk of file to add.
internal_filename: string. Internal archive path to write to.
"""
self._zipfile.write(local_filename, arcname=internal_filename)
def close(self):
"""Closes archive and test for integrity; must close before read."""
self._zipfile.testzip()
self._zipfile.close()
def get(self, path):
"""Return the raw bytes of the archive entity found at path.
Returns None if path is not in the archive.
Args:
path: string. Path of file to retrieve from the archive.
Returns:
Bytes of file contents.
"""
assert self._zipfile
try:
return self._zipfile.read(path)
except KeyError:
pass
def open(self, mode):
"""Opens archive in the mode given by mode string ('r', 'w', 'a')."""
assert not self._zipfile
self._zipfile = zipfile.ZipFile(self._path, mode, allowZip64=True)
class _DirectoryArchive(_AbstractArchive):
def _ensure_directory(self, filename):
dir_path = os.path.join(self.path, os.path.dirname(filename))
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def add(self, filename, contents):
self._ensure_directory(filename)
with open(os.path.join(self.path, filename), 'wb') as fp:
fp.write(contents)
def add_local_file(self, local_filename, filename):
self._ensure_directory(filename)
shutil.copyfile(local_filename, os.path.join(self.path, filename))
def close(self):
pass
def get(self, filename):
with open(os.path.join(self.path, filename), 'rb') as fp:
return fp.read()
def open(self, mode):
if mode in ('w', 'a'):
if not os.path.exists(self.path):
os.makedirs(self.path)
elif not os.path.isdir(self.path):
raise ValueError('"%s" is not a directory.' % self.path)
class _Manifest(object):
"""Manifest that lists the contents and version of an archive folder."""
def __init__(self, raw, version):
"""Constructs a new manifest.
Args:
raw: string. Raw course definition string.
version: string. Version of Course Builder course this manifest was
generated from.
"""
self._entities = []
self._raw = raw
self._version = version
@classmethod
def from_json(cls, json):
"""Returns a manifest for the given JSON string."""
parsed = transforms.loads(json)
instance = cls(parsed['raw'], parsed['version'])
for entity in parsed['entities']:
instance.add(_ManifestEntity(entity['path'], entity['is_draft']))
return instance
def add(self, entity):
self._entities.append(entity)
def get(self, path):
"""Gets _Entity by path string; returns None if not found."""
for entity in self._entities:
if entity.path == path:
return entity
@property
def entities(self):
return sorted(self._entities, key=lambda e: e.path)
@property
def raw(self):
return self._raw
@property
def version(self):
return self._version
def __str__(self):
"""Returns JSON representation of the manifest."""
manifest = {
'entities': [e.__dict__ for e in self.entities],
'raw': self.raw,
'version': self.version,
}
return transforms.dumps(manifest, indent=2, sort_keys=2)
class _ManifestEntity(object):
"""Object that represents an entity in a manifest."""
def __init__(self, path, is_draft):
self.is_draft = is_draft
self.path = path
class _ReadWrapper(object):
"""Wrapper for raw bytes that supports read()."""
def __init__(self, data):
"""Constructs a new read wrapper.
Args:
data: bytes. The bytes to return on read().
"""
self._data = data
def read(self):
return self._data
def _confirm_delete_datastore_or_die(kind_names, namespace, title):
"""Asks user to confirm action."""
context = {
'confirmation_message': _DELETE_DATASTORE_CONFIRMATION_INPUT,
'kinds': ', '.join(kind_names),
'linebreak': os.linesep,
'namespace': namespace,
'title': title,
}
response = _raw_input(
('You are about to delete all entities of the kinds "%(kinds)s" from '
'the course %(title)s in namespace %(namespace)s.%(linebreak)sYou are '
'also about to flush all caches for all courses on your production '
'instance.%(linebreak)sYou cannot undo this operation.%(linebreak)sTo '
'confirm, type "%(confirmation_message)s": ') % context)
if response != _DELETE_DATASTORE_CONFIRMATION_INPUT:
_die('Delete not confirmed. Aborting')
def _delete(params):
"""Deletes desired object."""
context = _get_context_or_die(params.course_url_prefix)
with common_utils.Namespace(context.get_namespace_name()):
if params.type == _TYPE_COURSE:
_delete_course()
elif params.type == _TYPE_DATASTORE:
_delete_datastore(context, params.batch_size)
def _delete_course():
"""Stub for possible future course deleter."""
raise NotImplementedError
def _delete_datastore(context, batch_size):
"""Deletes datastore content."""
kind_names = _get_datastore_kinds()
_confirm_delete_datastore_or_die(
kind_names, context.get_namespace_name(), context.get_title())
# Fetch all classes before the loop so we cannot hit an import error partway
# through issuing delete RPCs.
model_classes = [db.class_for_kind(kind_name) for kind_name in kind_names]
_LOG.info('Beginning datastore delete')
for model_class in model_classes:
_LOG.info('Deleting entities of kind %s', model_class.kind())
_process_models(model_class, batch_size, delete=True)
_LOG.info('Flushing all caches')
memcache.flush_all()
_LOG.info('Done')
def _die(message, with_trace=False):
if with_trace: # Also logs most recent traceback.
info = sys.exc_info()
message = '%s%s%s%s%s%s%s' % (
message, os.linesep,
info[0], os.linesep, # exception class name
info[1], os.linesep, # exception message
''.join(traceback.format_tb(info[2]))) # exception stack
_LOG.critical(message)
sys.exit(1)
def _download(params):
"""Validates and dispatches to a specific download method."""
archive_path = os.path.abspath(params.archive_path)
context = _get_context_or_die(params.course_url_prefix)
course = etl_lib.get_course(context)
with common_utils.Namespace(context.get_namespace_name()):
if params.type == _TYPE_COURSE:
_download_course(context, course, archive_path, params)
elif params.type == _TYPE_DATASTORE:
_download_datastore(context, course, archive_path, params)
def _download_course(context, course, archive_path, params):
"""Downloads course content."""
if course.version < courses.COURSE_MODEL_VERSION_1_3:
_die(
'Cannot export course made with Course Builder version < %s' % (
courses.COURSE_MODEL_VERSION_1_3))
archive = _init_archive(archive_path,
vars(params).get('archive_type', ARCHIVE_TYPE_ZIP))
archive.open('w')
manifest = _Manifest(context.raw, course.version)
_LOG.info('Processing course with URL prefix ' + params.course_url_prefix)
datastore_files = set(_list_all(context))
all_files = set(_filter_filesystem_files(_list_all(
context, include_inherited=True)))
filesystem_files = all_files - datastore_files
if vars(params).get('no_static_files', False):
# pylint: disable=protected-access
always_allowed_files = set([
context.fs.impl._physical_to_logical(_COURSE_JSON_PATH_SUFFIX),
context.fs.impl._physical_to_logical(_COURSE_YAML_PATH_SUFFIX)])
filesystem_files.intersection_update(always_allowed_files)
datastore_files.intersection_update(always_allowed_files)
_LOG.info('Adding files from datastore')
for external_path in datastore_files:
internal_path = _AbstractArchive.get_internal_path(external_path)
if params.verbose:
_LOG.info('Adding ' + internal_path)
stream = _get_stream(context, external_path)
is_draft = False
if stream.metadata and hasattr(stream.metadata, 'is_draft'):
is_draft = stream.metadata.is_draft
entity = _ManifestEntity(internal_path, is_draft)
archive.add(internal_path, stream.read())
manifest.add(entity)
_LOG.info('Adding files from filesystem')
for external_path in filesystem_files:
with open(external_path) as f:
internal_path = _AbstractArchive.get_internal_path(external_path)
if params.verbose:
_LOG.info('Adding ' + internal_path)
archive.add(internal_path, f.read())
manifest.add(_ManifestEntity(internal_path, False))
_LOG.info('Adding dependencies from datastore')
all_entities = list(courses.COURSE_CONTENT_ENTITIES) + list(
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT)
for found_type in all_entities:
_download_type(
archive, manifest, found_type.__name__, params.batch_size,
_IDENTITY_TRANSFORM)
_finalize_download(archive, manifest)
def _download_datastore(context, course, archive_path, params):
"""Downloads datastore content."""
available_types = set(_get_datastore_kinds())
type_names = params.datastore_types
if not type_names:
type_names = available_types
requested_types = (
set(type_names) - set(params.exclude_types) - set(_EXCLUDE_TYPES))
missing_types = requested_types - available_types
if missing_types:
_die(
'Requested types not found: %s%sAvailable types are: %s' % (
', '.join(missing_types), os.linesep,
', '.join(available_types)))
privacy_secret = _get_privacy_secret(params.privacy_secret)
privacy_transform_fn = _get_privacy_transform_fn(
params.privacy, privacy_secret)
found_types = requested_types & available_types
archive = _init_archive(archive_path,
vars(params).get('archive_type', ARCHIVE_TYPE_ZIP))
archive.open('w')
manifest = _Manifest(context.raw, course.version)
for found_type in found_types:
_download_type(archive, manifest, found_type, params.batch_size,
privacy_transform_fn)
_finalize_download(archive, manifest)
def _download_type(
archive, manifest, model_class, batch_size, privacy_transform_fn):
"""Downloads a set of files and adds them to the archive."""
json_path = os.path.join(
os.path.dirname(archive.path), '%s.json' % model_class)
_LOG.info(
'Adding entities of type %s to temporary file %s',
model_class, json_path)
json_file = transforms.JsonFile(json_path)
json_file.open('w')
model_map_fn = functools.partial(
_write_model_to_json_file, json_file, privacy_transform_fn)
_process_models(
db.class_for_kind(model_class), batch_size,
model_map_fn=model_map_fn)
json_file.close()
internal_path = _AbstractArchive.get_internal_path(
os.path.basename(json_file.name), prefix=_ARCHIVE_PATH_PREFIX_MODELS)
_LOG.info('Adding %s to archive', internal_path)
archive.add_local_file(json_file.name, internal_path)
manifest.add(_ManifestEntity(internal_path, False))
_LOG.info('Removing temporary file ' + json_file.name)
os.remove(json_file.name)
def _filter_filesystem_files(files):
"""Filters out unnecessary files from a local filesystem.
If we just read from disk, we'll pick up and archive lots of files that we
don't need to upload later, plus non-userland code that on reupload will
shadow the system versions (views, assets/lib, etc.).
Args:
files: list of string. Absolute file paths.
Returns:
List of string. Absolute filepaths we want to archive.
"""
filtered_files = []
for path in files:
relative_name = _remove_bundle_root(path)
not_in_excludes = not any(
[relative_name.startswith(e) for e in _LOCAL_WHITELIST_EXCLUDES])
head_directory = relative_name.split(os.path.sep)[0]
if not_in_excludes and head_directory in _LOCAL_WHITELIST:
filtered_files.append(path)
return filtered_files
def _finalize_download(archive, manifest):
_LOG.info('Adding manifest')
archive.add(_MANIFEST_FILENAME, str(manifest))
archive.close()
_LOG.info('Done; archive saved to ' + archive.path)
def _force_config_reload():
# For some reason config properties aren't being automatically pulled from
# the datastore with the remote environment. Force an update of all of them.
config.Registry.get_overrides(force_update=True)
def _get_context_or_die(course_url_prefix):
context = etl_lib.get_context(course_url_prefix)
if not context:
_die('No course found with course_url_prefix %s' % course_url_prefix)
return context
def _get_privacy_transform_fn(privacy, privacy_secret):
"""Returns a transform function to use for export."""
assert privacy_secret is not None
if not privacy:
return _IDENTITY_TRANSFORM
else:
return functools.partial(crypto.hmac_sha_2_256_transform,
privacy_secret)
def _get_privacy_secret(privacy_secret):
"""Gets the passed privacy secret (or 128 random bits if None)."""
secret = privacy_secret
if secret is None:
secret = random.getrandbits(128)
return secret
def _set_env_vars_from_app_yaml():
"""Read and set environment from app.yaml.
This is to set up the GCB_REGISTERED_MODULES and
GCB_REGISTERED_MODULES_CUSTOM vars so that main's call to
appengine_config.import_and_enable_modules() will work properly.
"""
from google.appengine.api import appinfo_includes
import appengine_config # pylint: disable=redefined-outer-name
cb_home = os.environ.get(
'COURSEBUILDER_HOME', appengine_config.BUNDLE_ROOT)
app_yaml = appinfo_includes.Parse(
open(os.path.join(cb_home, 'app.yaml')), open)
for name, value in app_yaml.env_variables.items():
os.environ[name] = value
def _import_entity_modules():
"""Import all entity type classes.
We need to import main.py to make sure all known entity types are imported
by the time the ETL code runs. If a transitive closure of main.py imports
does not import all required classes, import them here explicitly.
"""
# pylint: disable=global-variable-not-assigned,
# pylint: disable=redefined-outer-name,unused-variable
try:
import main
except ImportError, e:
_die((
'Unable to import required modules; see tools/etl/etl.py for '
'docs.'), with_trace=True)
def _import_modules_into_global_scope():
"""Import helper; run after _set_up_sys_path() for imports to resolve."""
# pylint: disable=global-variable-not-assigned,
# pylint: disable=redefined-outer-name,unused-variable
global appengine_config
global memcache
global db
global entities
global entity_transforms
global metadata
global common_utils
global config
global courses
global crypto
global models
global sites
global transforms
global vfs
global etl_lib
global remote
try:
import appengine_config
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext.db import metadata
from common import crypto
from common import utils as common_utils
from models import config
from controllers import sites
from models import courses
from models import entities
from models import entity_transforms
from models import models
from models import transforms
from models import vfs
from tools.etl import etl_lib
from tools.etl import remote
except ImportError, e:
_die((
'Unable to import required modules; see tools/etl/etl.py for '
'docs.'), with_trace=True)
def _remove_bundle_root(path):
"""Removes BUNDLE_ROOT prefix from a path."""
if path.startswith(appengine_config.BUNDLE_ROOT):
path = path.split(appengine_config.BUNDLE_ROOT)[1]
# Path must not start with path separator so it is os.path.join()able.
if path.startswith(os.path.sep):
path = path[1:]
return path
def _retry(message=None, times=_RETRIES):
"""Returns a decorator that automatically retries functions on error.
Args:
message: string or None. The optional message to log on retry.
times: int. Number of times to retry.
Returns:
Function wrapper.
"""
assert times > 0
def decorator(fn):
"""Real decorator."""
def wrapped(*args, **kwargs):
failures = 0
while failures < times:
try:
return fn(*args, **kwargs)
# We can't be more specific by default.
# pylint: disable=broad-except
except Exception as e:
if message:
_LOG.info(message)
failures += 1
if failures == times:
traceback.print_exc() # Show origin of failure
raise e
return wrapped
return decorator
@_retry(message='Checking if the specified course is empty failed; retrying')
def _context_is_for_empty_course(context):
# True if course is entirely empty or contains only a course.yaml.
current_course_files = context.fs.impl.list(
appengine_config.BUNDLE_ROOT)
empty_course_files = [os.path.join(
appengine_config.BUNDLE_ROOT, _COURSE_YAML_PATH_SUFFIX)]
return (
(not current_course_files) or
current_course_files == empty_course_files)
@_retry(message='Getting list of datastore_types failed; retrying')
def _get_datastore_kinds():
# Return only user-defined names, not __internal_appengine_names__.
return [
k for k in metadata.get_kinds()
if not _INTERNAL_DATASTORE_KIND_REGEX.match(k)]
@_retry(message='Getting contents for entity failed; retrying')
def _get_stream(context, path):
return context.fs.impl.get(path)
@_retry(message='Fetching asset list failed; retrying')
def _list_all(context, include_inherited=False):
return context.fs.impl.list(
appengine_config.BUNDLE_ROOT, include_inherited=include_inherited)
def _process_models(model_class, batch_size, delete=False, model_map_fn=None):
"""Fetch all rows in batches."""
assert (delete or model_map_fn) or (not delete and model_map_fn)
reportable_chunk = batch_size * 10
total_count = 0
cursor = None
while True:
batch_count, cursor = _process_models_batch(
model_class, cursor, batch_size, delete, model_map_fn)
if not batch_count:
break
if not cursor:
break
total_count += batch_count
if not total_count % reportable_chunk:
_LOG.info('Processed records: %s', total_count)
@_retry(message='Processing datastore entity batch failed; retrying')
def _process_models_batch(
model_class, cursor, batch_size, delete, model_map_fn):
"""Processes or deletes models in batches."""
query = model_class.all(keys_only=delete)
if cursor:
query.with_cursor(start_cursor=cursor)
count = 0
empty = True
results = query.fetch(limit=batch_size)
if results:
empty = False
if delete:
key_count = len(results)
db.delete(results)
count += key_count
else:
for result in results:
model_map_fn(result)
count += 1
cursor = None
if not empty:
cursor = query.cursor()
return count, cursor
def _get_entity_dict(model, privacy_transform_fn):
key = model.safe_key(model.key(), privacy_transform_fn)
if privacy_transform_fn is not _IDENTITY_TRANSFORM:
model = model.for_export(privacy_transform_fn)
entity_dict = transforms.entity_to_dict(model, force_utf_8_encoding=True)
entity_dict['key.name'] = unicode(key.name())
entity_dict['key.id'] = key.id()
return entity_dict
@_retry(message='Upload failed; retrying')
def _put(context, content, path, is_draft, force_overwrite, verbose):
path = os.path.join(appengine_config.BUNDLE_ROOT, path)
description = _remove_bundle_root(path)
do_put = False
if context.fs.impl.isfile(path) and not path.endswith('/course.yaml'):
if force_overwrite:
_LOG.info('Overriding file %s', description)
context.fs.impl.delete(path)
do_put = True
elif verbose:
_LOG.info('Not replacing existing file %s', description)
else:
do_put = True
if verbose:
_LOG.info('Uploading file %s', description)
if do_put:
context.fs.impl.non_transactional_put(
os.path.join(appengine_config.BUNDLE_ROOT, path), content.read(),
is_draft=is_draft)
def _raw_input(message):
"""raw_input wrapper scoped to the module for swapping during tests."""
return raw_input(message)
def _run_custom(parsed_args):
"""Runs desired command."""
try:
module_name, job_class_name = parsed_args.type.rsplit('.', 1)
module = __import__(module_name, globals(), locals(), [job_class_name])
job_class = getattr(module, job_class_name)
assert issubclass(job_class, etl_lib.Job)
job = job_class(parsed_args)
except: # Any error means death. pylint: disable=bare-except
_die(
'Unable to import and instantiate %s, or not of type %s' % (
parsed_args.type, etl_lib.Job.__name__),
with_trace=True)
job.run()
_LOG.info('Completed job %s', job_class.__name__)
def _upload(params):
_LOG.info('Processing course with URL prefix %s from archive path %s',
params.course_url_prefix, params.archive_path)
context = _get_context_or_die(params.course_url_prefix)
all_entities = list(courses.COURSE_CONTENT_ENTITIES) + list(
courses.ADDITIONAL_ENTITIES_FOR_COURSE_IMPORT)
with common_utils.Namespace(context.get_namespace_name()):
if params.type == _TYPE_COURSE:
_upload_course(context, params)
type_names = [x.__name__ for x in all_entities]
_upload_datastore(params, type_names)
elif params.type == _TYPE_DATASTORE:
_upload_datastore(params, params.datastore_types)
sites.ApplicationContext.clear_per_process_cache()
def _can_upload_entity_to_course(entity):
"""Checks if a file can be uploaded to course."""
head, tail = os.path.split(entity.path)
if head == _ARCHIVE_PATH_PREFIX_MODELS and tail == _COURSE_YAML_PATH_SUFFIX:
return True
return head != _ARCHIVE_PATH_PREFIX_MODELS
def _upload_course(context, params):
"""Uploads course data."""
if not _context_is_for_empty_course(context) and not params.force_overwrite:
_die('Cannot upload to non-empty course with course_url_prefix %s. '
'You can override this behavior via the --force_overwrite flag.' %
params.course_url_prefix)
archive = _init_archive(params.archive_path,
vars(params).get('archive_type', ARCHIVE_TYPE_ZIP))
try:
archive.open('r')
except IOError:
_die('Cannot open archive_path ' + params.archive_path)
course_json = archive.get(
_AbstractArchive.get_internal_path(_COURSE_JSON_PATH_SUFFIX))
if course_json:
try:
courses.PersistentCourse13().deserialize(course_json)
except (AttributeError, ValueError):
_die((
'Cannot upload archive at %s containing malformed '
'course.json') % params.archive_path)
course_yaml = archive.get(
_AbstractArchive.get_internal_path(_COURSE_YAML_PATH_SUFFIX))
if course_yaml:
try:
yaml.safe_load(course_yaml)
except Exception: # pylint: disable=broad-except
_die((
'Cannot upload archive at %s containing malformed '
'course.yaml') % params.archive_path)
files_filter = set()
if vars(params).get('no_static_files', False):
files_filter.add(_COURSE_JSON_PATH_SUFFIX)
files_filter.add(_COURSE_YAML_PATH_SUFFIX)
_LOG.info('Uploading files')
count = 0
for entity in archive.manifest.entities:
if not _can_upload_entity_to_course(entity):
_LOG.info('Skipping file ' + entity.path)
continue
if files_filter and entity.path in files_filter and params.verbose:
_LOG.info('Skipping file ' + entity.path +
' due to --no_static_files')
continue
external_path = _AbstractArchive.get_external_path(entity.path)
_put(
context, _ReadWrapper(archive.get(entity.path)), external_path,
entity.is_draft, params.force_overwrite, params.verbose)
count += 1
_LOG.info('Uploaded %d files.', count)
def _get_classes_for_type_names(type_names):
entity_classes = []
any_problems = False
for type_name in type_names:
# TODO(johncox): Add class-method to troublesome types so they can be
# regenerated from serialized ETL data.
if type_name in ('Submission', 'Review'):
any_problems = True
_LOG.critical(
'Cannot upload entities of type "%s". '
'This type has a nontrivial constructor, and simply '
'setting properties into the DB base object type is '
'insufficient to correctly construct this type.', type_name)
continue
try:
entity_class = db.class_for_kind(type_name)
entity_classes.append(entity_class)
except db.KindError:
any_problems = True
_LOG.critical(
'Cannot upload entities of type "%s". '
'The corresponding Python class for this entity type '
'was not found in CourseBuilder. This indicates a '
'substantial incompatiblity in versions; some or all '
'functionality may be affected. Use the --exclude_types '
'flag to skip entities of this type.', type_name)
if any_problems:
_die('Cannot proceed with upload in the face of these problems.')
return entity_classes
def _determine_type_names(params, included_type_names, archive):
included_type_names = set(included_type_names)
excluded_type_names = set(params.exclude_types)
zipfile_type_names = set()
for entity in archive.manifest.entities:
head, tail = os.path.split(entity.path)
if head == _ARCHIVE_PATH_PREFIX_MODELS:
zipfile_type_names.add(tail.replace('.json', ''))
if not zipfile_type_names:
_die('No entity types to upload found in archive file "%s"' %
params.archive_path)
if not included_type_names:
included_type_names = zipfile_type_names
for type_name in included_type_names - zipfile_type_names:
_LOG.error('Included type "%s" not found in archive.', type_name)
included_type_names &= zipfile_type_names
for type_name in excluded_type_names - zipfile_type_names:
_LOG.warning('Excluded type "%s" not found in archive.', type_name)
excluded_type_names &= zipfile_type_names
for type_name in excluded_type_names - included_type_names:
_LOG.info('Redundant exclusion of type "%s" by mention in '
'--excluded_types and non-mention in '
'the --datastore_types list.', type_name)
for type_name in included_type_names & excluded_type_names:
_LOG.info('Excluding type "%s" from upload.', type_name)
ret = included_type_names - excluded_type_names
if not ret:
_die('Command line flags specify that no entity types are '
'eligible to be uploaded. Available types are: %s' %
' '.join(sorted(zipfile_type_names)))
return ret
def _upload_datastore(params, included_type_names):
archive = _init_archive(params.archive_path,
vars(params).get('archive_type', ARCHIVE_TYPE_ZIP))
try:
archive.open('r')
except IOError:
_die('Cannot open archive path ' + params.archive_path)
type_names = _determine_type_names(params, included_type_names, archive)
entity_classes = _get_classes_for_type_names(type_names)
total_count = 0
total_start = time.time()
for entity_class in entity_classes:
_LOG.info('-------------------------------------------------------')
_LOG.info('Adding entities of type %s', entity_class.__name__)
# Get JSON contents from .zip file
json_path = _AbstractArchive.get_internal_path(
'%s.json' % entity_class.__name__,
prefix=_ARCHIVE_PATH_PREFIX_MODELS)
_LOG.info('Fetching data from .zip archive')
json_text = archive.get(json_path)
if not json_text:
_LOG.info(
'Unable to find data file %s for entity %s; skipping',
json_path, entity_class.__name__)
continue
_LOG.info('Parsing data into JSON')
json_object = transforms.loads(json_text)
schema = (entity_transforms
.get_schema_for_entity(entity_class)
.get_json_schema_dict())
total_count += _upload_entities_for_class(
entity_class, schema, json_object['rows'], params)
_LOG.info('Flushing all caches')
memcache.flush_all()
total_end = time.time()
_LOG.info(
'Done; %s entit%s uploaded in %d seconds', total_count,
'y' if total_count == 1 else 'ies', int(total_end - total_start))
def _upload_entities_for_class(entity_class, schema, entities, params):
num_entities = len(entities)
i = 0
is_first_batch_after_resume = False
# Binary search to find first un-uploaded entity.
if params.resume:
_LOG.info('Resuming upload; searching for first non-uploaded entry.')
start = 0
end = num_entities
while start < end:
guess = (start + end) / 2
if params.verbose:
_LOG.info('Checking whether instance %d exists', guess)
key, _ = _get_entity_key(entity_class, entities[guess])
if db.get(key):
start = guess + 1
else:
end = guess
i = start
# If we are doing things in batches, it is possible that the previous
# batch only partially completed. Experiments on a dev instance show
# that partial writes do not proceed in the order the items are
# supplied. I see no reason to trust that production will be any
# friendlier. Check that there are no missed entities up to one
# chunk back from where we are planning on restarting the upload.
if params.batch_size > 1 and i > 0:
start = max(0, i - params.batch_size)
end = min(start + params.batch_size, len(entities))
is_first_batch_after_resume = True
existing = _find_existing_items(entity_class, entities, start, end)
if None in existing:
if start > 0:
_LOG.info('Previous chunk only partially completed; '
'backing up from found location by one full '
'chunk just in case.')
i = start
if i < num_entities:
_LOG.info('Resuming upload at item number %d of %d.', i,
num_entities)
else:
_LOG.info('All %d entities already uploaded; skipping.',
num_entities)
# Proceed to end of entities (starting from 0 if not resuming)
# pylint: disable=protected-access
progress = etl_lib._ProgressReporter(
_LOG, 'Uploaded', entity_class.__name__, _UPLOAD_CHUNK_SIZE,
len(entities) - i)
if i < num_entities:
_LOG.info('Starting upload of entities')
while i < num_entities:
quantity = _upload_batch(entity_class, schema, entities, i,
is_first_batch_after_resume, params)
progress.count(quantity)
i += quantity
is_first_batch_after_resume = False
progress.report()
_LOG.info('Upload of %s complete', entity_class.__name__)
return progress.get_count()
def _find_existing_items(entity_class, entities, start, end):
keys = []
for i in xrange(start, end):
key, _ = _get_entity_key(entity_class, entities[i])
keys.append(key)
return db.get(keys)
@_retry(message='Uploading batch of entities failed; retrying')
def _upload_batch(entity_class, schema, entities, start,
is_first_batch_after_resume, params):
end = min(start + params.batch_size, len(entities))
# See what elements we want to upload already exist in the datastore.
if params.force_overwrite:
existing = []
else:
existing = _find_existing_items(entity_class, entities, start, end)
# Build up array of things to batch-put to DB.
to_put = []
for i in xrange(start, end):
key, id_or_name = _get_entity_key(entity_class, entities[i])
if params.force_overwrite:
if params.verbose:
_LOG.info('Forcing write of object #%d with key %s',
i, id_or_name)
elif existing[i - start]:
if is_first_batch_after_resume:
if params.verbose:
_LOG.info('Not overwriting object #%d with key %s '
'written in previous batch which we are '
'now recovering.', i, id_or_name)
continue
else:
_die('Object #%d of class %s with key %s already exists.' % (
i, entity_class.__name__, id_or_name))
else:
if params.verbose:
_LOG.info('Adding new object #%d with key %s', i, id_or_name)
to_put.append(_build_entity(entity_class, schema, entities[i], key))
if params.verbose:
_LOG.info('Sending batch of %d objects to DB', end - start)
db.put(to_put)
return end - start
def _get_entity_key(entity_class, entity):
id_or_name = entity['key.id'] or entity['key.name']
return db.Key.from_path(entity_class.__name__, id_or_name), id_or_name
def _build_entity(entity_class, schema, entity, key):
new_instance = entity_class(key=key)
typed_dict = transforms.json_to_dict(
entity, schema, permit_none_values=True)
entity_transforms.dict_to_entity(new_instance, typed_dict)
return new_instance
def _validate_arguments(parsed_args):
"""Validate parsed args for additional constraints."""
if (parsed_args.mode in {_MODE_DOWNLOAD, _MODE_UPLOAD}
and not parsed_args.archive_path):
_die('--archive_path missing')
if parsed_args.batch_size < 1:
_die('--batch_size must be a positive value')
if (parsed_args.mode == _MODE_DOWNLOAD and
os.path.exists(parsed_args.archive_path) and
not parsed_args.force_overwrite):
_die(
'Cannot download to archive path %s; file already exists' % (
parsed_args.archive_path))
if (parsed_args.disable_remote and
parsed_args.mode != _MODE_RUN
and not parsed_args.internal):
_die('--disable_remote supported only if mode is ' + _MODE_RUN)
if (parsed_args.force_overwrite and
parsed_args.mode not in _FORCE_OVERWRITE_MODES):
_die(
'--force_overwrite supported only if mode is one of %s' % (
', '.join(_FORCE_OVERWRITE_MODES)))
if parsed_args.privacy and not (
parsed_args.mode == _MODE_DOWNLOAD and
parsed_args.type == _TYPE_DATASTORE):
_die(
'--privacy supported only if mode is %s and type is %s' % (
_MODE_DOWNLOAD, _TYPE_DATASTORE))
if parsed_args.privacy_secret and not (
parsed_args.mode == _MODE_DOWNLOAD and
parsed_args.type == _TYPE_DATASTORE and parsed_args.privacy):
_die(
'--privacy_secret supported only if mode is %s, type is %s, and '
'--privacy is passed' % (_MODE_DOWNLOAD, _TYPE_DATASTORE))
if parsed_args.resume and parsed_args.mode != _MODE_UPLOAD:
_die('--resume flag is only supported for uploading.')
def _write_model_to_json_file(json_file, privacy_transform_fn, model):
entity_dict = _get_entity_dict(model, privacy_transform_fn)
json_file.write(transforms.dict_to_json(entity_dict, None))
def main(parsed_args, environment_class=None):
"""Performs the requested ETL operation.
Args:
parsed_args: argparse.Namespace. Parsed command-line arguments.
environment_class: None or remote.Environment. Environment setup class
used to configure the service stub map. Injectable for tests only;
defaults to remote.Environment if not specified.
"""
_validate_arguments(parsed_args)
_LOG.setLevel(parsed_args.log_level.upper())
_import_modules_into_global_scope()
_set_env_vars_from_app_yaml()
_import_entity_modules()
if not environment_class:
environment_class = remote.Environment
_LOG.info('Mode is %s', parsed_args.mode)
_LOG.info(
'Target is url %s from application_id %s on server %s',
parsed_args.course_url_prefix, parsed_args.application_id,
parsed_args.server)
if not parsed_args.disable_remote:
environment_class(
parsed_args.application_id, parsed_args.server).establish()
_force_config_reload()
if parsed_args.mode == _MODE_DELETE:
_delete(parsed_args)
elif parsed_args.mode == _MODE_DOWNLOAD:
_download(parsed_args)
elif parsed_args.mode == _MODE_RUN:
_run_custom(parsed_args)
elif parsed_args.mode == _MODE_UPLOAD:
_upload(parsed_args)
if __name__ == '__main__':
main(create_configured_args_parser(sys.argv).parse_args())
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Examples of custom extract-transform-load jobs.
Custom jobs are run via tools/etl/etl.py. You must do environment setup before
etl.py can be invoked; see its module docstring for details.
See tools/etl/etl_lib.py for documentation on writing Job subclasses.
"""
__author__ = [
'johncox@google.com',
]
import os
import sys
import appengine_config
from models import models
from tools.etl import etl_lib
from google.appengine.api import memcache
from google.appengine.api import namespace_manager
class PrintMemcacheStats(etl_lib.Job):
"""Example job that prints remote memcache statistics.
Usage:
etl.py run tools.etl.examples.PrintMemcacheStats /course myapp \
server.appspot.com
Arguments to etl.py are documented in tools/etl/etl.py. You must do some
environment configuration (setting up imports, mostly) before you can run
etl.py; see the tools/etl/etl.py module-level docstring for details.
"""
# String. Template to use when printing memcache stats.
_STATS_TEMPLATE = """Global memcache stats:
\tHits: %(hits)s
\tItems in cache: %(items)s
\tMisses: %(misses)s
\tOldest item in seconds: %(oldest_item_age)s
\tTotal bytes in cache: %(bytes)s
\tTotal bytes retrieved via get: %(byte_hits)s"""
def main(self):
# Custom jobs execute locally, but can talk to remote services like the
# datastore and memcache. Here we get the same memcache stats you can
# see in the Memcache Viewer part of App Engine's admin console.
print self._STATS_TEMPLATE % memcache.get_stats()
class UploadFileToCourse(etl_lib.Job):
"""Example job that writes a single local file to a remote server.
Usage:
etl.py run tools.etl.examples.UploadFileToCourse /course myapp \
server.appspot.com --job_args='/path/to/local/file path/to/remote/file'
Arguments to etl.py are documented in tools/etl/etl.py. You must do some
environment configuration (setting up imports, mostly) before you can run
etl.py; see the tools/etl/etl.py module-level docstring for details.
"""
def _configure_parser(self):
# Add custom arguments by manipulating self.parser:
self.parser.add_argument(
'path', help='Absolute path of the file to upload', type=str)
self.parser.add_argument(
'target',
help=('Internal Course Builder path to upload to (e.g. '
'"assets/img/logo.png")'), type=str)
def main(self):
# By the time main() is invoked, arguments are parsed and available as
# self.args. If you need more complicated argument validation than
# argparse gives you, do it here:
if not os.path.exists(self.args.path):
sys.exit('%s does not exist' % self.args.path)
# Arguments passed to etl.py are also parsed and available as
# self.etl_args. Here we use them to figure out the requested course's
# context.
context = etl_lib.get_context(self.etl_args.course_url_prefix)
# Create the absolute path we'll write to.
remote_path = os.path.join(
appengine_config.BUNDLE_ROOT, self.args.target)
with open(self.args.path) as f:
# Perform the write using the context's filesystem. In a real
# program you'd probably want to do additional work (preventing
# overwrites of existing files, etc.).
context.fs.impl.put(remote_path, f, is_draft=False)
class WriteStudentEmailsToFile(etl_lib.Job):
"""Example job that reads student emails from remote server to local file.
Usage:
etl.py run tools.etl.examples.WriteStudentEmailsToFile /course myapp \
server.appspot.com --job_args=/path/to/output_file
Arguments to etl.py are documented in tools/etl/etl.py. You must do some
environment configuration (setting up imports, mostly) before you can run
etl.py; see the tools/etl/etl.py module-level docstring for details.
"""
def _configure_parser(self):
# Add custom arguments by manipulating self.parser.
self.parser.add_argument(
'path', help='Absolute path to save output to', type=str)
self.parser.add_argument(
'--batch_size', default=20,
help='Number of students to download in each batch', type=int)
def main(self):
# By the time main() is invoked, arguments are parsed and available as
# self.args. If you need more complicated argument validation than
# argparse gives you, do it here:
if self.args.batch_size < 1:
sys.exit('--batch size must be positive')
if os.path.exists(self.args.path):
sys.exit('Cannot download to %s; file exists' % self.args.path)
# Arguments passed to etl.py are also parsed and available as
# self.etl_args. Here we use them to figure out the requested course's
# namespace.
namespace = etl_lib.get_context(
self.etl_args.course_url_prefix).get_namespace_name()
# Because our models are namespaced, we need to change to the requested
# course's namespace before doing datastore reads or we won't find its
# data. Get the current namespace so we can change back when we're done.
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(namespace)
# For this example, we'll only process the first 1000 results. Can
# do a keys_only query because the student's email is key.name().
keys = models.Student.all(keys_only=True).fetch(1000)
finally:
# The current namespace is global state. We must change it back to
# the old value no matter what to prevent corrupting datastore
# operations that run after us.
namespace_manager.set_namespace(old_namespace)
# Write the results. Done!
with open(self.args.path, 'w') as f:
for key in keys:
f.write(str(key.name() + '\n'))
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows export of Lessons and Units to other systems."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
from datetime import datetime
import os
import verify
RELEASE_TAG = '1.0'
def echo(unused_x):
pass
JS_GCB_REGEX = """
function gcb_regex(base, modifiers) {
// NB: base should already have backslashes escaped
return new RegExp(base, modifiers);
}
"""
def export_to_javascript(filename, lines, date):
"""Creates JavaScript export function from given lines and writes a file."""
code = []
code.append(JS_GCB_REGEX)
code.append('function gcb_import(){')
for line in lines:
if line:
code.append(' %s' % line)
else:
code.append('')
code.append('')
code.append(' course = Array();')
code.append(' course["units"] = units;')
code.append(' course["assessments"] = assessments;')
code.append(' return course;')
code.append('}')
afile = open('%s.js' % filename, 'w')
afile.write('// Course Builder %s JavaScript Export on %s\n' % (
RELEASE_TAG, date))
afile.write('// begin\n')
afile.write('\n'.join(code))
afile.write('\n// end')
afile.close()
PYTHON_GCB_REGEX = """
import re
def gcb_regex(base, modifiers):
flags = 0
if 'i' in modifiers:
flags |= re.IGNORECASE
if 'm' in modifiers:
flags |= re.MULTILINE
return re.compile(base, flags)
"""
def export_to_python(filename, lines, date):
"""Creates Python export function from given lines and writes a file."""
code = []
code.append('class Array(dict):')
code.append(' pass')
code.append('')
code.append('true = True')
code.append('false = False')
code.append(PYTHON_GCB_REGEX)
code.append('def gcb_import():')
for line in lines:
code.append(' %s' % line)
code.append('')
code.append(' course = Array();')
code.append(' course["units"] = units;')
code.append(' course["assessments"] = assessments;')
code.append(' return course;')
afile = open('%s.py' % filename, 'w')
afile.write('# Course Builder %s Python Export on %s\n' % (
RELEASE_TAG, date))
afile.write('# begin\n')
afile.write('\n'.join(code))
afile.write('\n# end')
afile.close()
# TODO(psimakov): implement PHP_GCB_REGEX, but it's unclear how to return a new
# regexp object in PHP. maybe see http://www.regular-expressions.info/php.html
def export_to_php(filename, lines, date):
"""Creates PHP export function from given lines and writes a file."""
code = []
code.append('function gcb_import(){')
for line in lines:
if line:
code.append(' $%s' % line)
else:
code.append('')
code.append('')
code.append(' $course = Array();')
code.append(' $course["units"] = $units;')
code.append(' $course["assessments"] = $assessments;')
code.append(' return $course;')
code.append('}')
afile = open('%s.php' % filename, 'w')
afile.write('<?php\n')
afile.write('// Course Builder %s PHP Export on %s\n' %
(RELEASE_TAG, date))
afile.write('// begin\n')
afile.write('\n'.join(code))
afile.write('\n// end')
afile.write('?>')
afile.close()
def export_to_file(filename, lines):
date = datetime.utcnow()
export_to_javascript(filename, lines, date)
export_to_python(filename, lines, date)
export_to_php(filename, lines, date)
if __name__ == '__main__':
print 'Export started using %s' % os.path.realpath(__file__)
verifier = verify.Verifier()
errors = verifier.load_and_verify_model(echo)
if errors:
raise Exception('Please fix all errors reported by tools/verify.py '
'before continuing!')
fname = os.path.join(os.getcwd(), 'coursebuilder_course')
export_to_file(fname, verifier.export)
print 'Export complete to %s' % fname
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import logging
from utils import BaseHandler
from utils import HUMAN_READABLE_DATETIME_FORMAT
from controllers import lessons
from models import courses
from models import models
from models import review
from models import student_work
from models import transforms
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from tools import verify
from google.appengine.ext import db
def store_score(course, student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
course: the course containing the assessment.
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the result of the assessment, if appropriate.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = course.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_type, score)
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores.
Args:
email: the student's email address.
assessment_type: the title of the assessment.
new_answers: the latest set of answers supplied by the student.
score: the numerical assessment score.
Returns:
the student instance.
"""
student = Student.get_enrolled_student_by_email(email)
course = self.get_course()
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
store_score(course, student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), transforms.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return student
def get(self):
"""Handles GET requests.
This method is here because if a student logs out when on the
reviewed_assessment_confirmation page, that student is redirected to
the GET method of the corresponding handler. It might be a good idea to
merge this class with lessons.AssessmentHandler, which currently only
has a GET handler.
"""
self.redirect('/course')
# pylint: disable=too-many-statements
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
course = self.get_course()
assessment_type = self.request.get('assessment_type')
if not assessment_type:
self.error(404)
logging.error('No assessment type supplied.')
return
unit = course.find_unit_by_id(assessment_type)
if unit is None or unit.type != verify.UNIT_TYPE_ASSESSMENT:
self.error(404)
logging.error('No assessment named %s exists.', assessment_type)
return
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['assessment_name'] = unit.title
self.template_value['is_last_assessment'] = (
course.is_last_assessment(unit))
self.template_value['unit_id'] = unit.unit_id
# Convert answers from JSON to dict.
answers = self.request.get('answers')
answers = transforms.loads(answers) if answers else []
grader = unit.workflow.get_grader()
# Scores are not recorded for human-reviewed assignments.
score = 0
if grader == courses.AUTO_GRADER:
score = int(round(float(self.request.get('score'))))
# Record assessment transaction.
student = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
if grader == courses.HUMAN_GRADER:
rp = course.get_reviews_processor()
# Guard against duplicate submissions of a human-graded assessment.
previously_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
if not previously_submitted:
# Check that the submission due date has not passed.
time_now = datetime.datetime.now()
submission_due_date = unit.workflow.get_submission_due_date()
if time_now > submission_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['submission_due_date'] = (
submission_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = (
'assignment_deadline_exceeded')
self.render('error.html')
return
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
rp.start_review_process_for(
unit.unit_id, submission_key, student.get_key())
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
self.template_value['previously_submitted'] = previously_submitted
matcher = unit.workflow.get_matcher()
self.template_value['matcher'] = matcher
if matcher == review.PEER_MATCHER:
self.template_value['review_dashboard_url'] = (
'reviewdashboard?unit=%s' % unit.unit_id
)
self.render('reviewed_assessment_confirmation.html')
return
else:
# Record completion event in progress tracker.
course.get_progress_tracker().put_assessment_completed(
student, assessment_type)
# Save the submission in the datastore, overwriting the earlier
# version if it exists.
submission_key = student_work.Submission.write(
unit.unit_id, student.get_key(), answers)
course.update_final_grades(student)
parent_unit = course.get_parent_unit(unit.unit_id)
if parent_unit:
unit_contents = lessons.UnitHandler.UnitLeftNavElements(
course, parent_unit)
next_url = unit_contents.get_url_by(
'assessment', unit.unit_id, 0) + '&confirmation'
self.redirect('/' + next_url)
else:
self.template_value['result'] = course.get_overall_result(
student)
self.template_value['score'] = score
self.template_value['overall_score'] = course.get_overall_score(
student)
self.render('test_confirmation.html')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for generating various frontend pages."""
__author__ = 'Saifu Angto (saifu@google.com)'
import copy
import datetime
import urllib
import urlparse
from utils import BaseHandler
from utils import BaseRESTHandler
from utils import CAN_PERSIST_ACTIVITY_EVENTS
from utils import CAN_PERSIST_PAGE_EVENTS
from utils import CAN_PERSIST_TAG_EVENTS
from utils import HUMAN_READABLE_DATETIME_FORMAT
from utils import TRANSIENT_STUDENT
from utils import XsrfTokenManager
from common import jinja_utils
from common import safe_dom
from models import courses
from models import models
from models import student_work
from models import transforms
from models.counters import PerfCounter
from models.models import Student
from models.models import StudentProfileDAO
from models.review import ReviewUtils
from models.student_work import StudentWorkUtils
from modules import courses as courses_module
from modules.review import domain
from tools import verify
from google.appengine.ext import db
COURSE_EVENTS_RECEIVED = PerfCounter(
'gcb-course-events-received',
'A number of activity/assessment events received by the server.')
COURSE_EVENTS_RECORDED = PerfCounter(
'gcb-course-events-recorded',
'A number of activity/assessment events recorded in a datastore.')
UNIT_PAGE_TYPE = 'unit'
ACTIVITY_PAGE_TYPE = 'activity'
ASSESSMENT_PAGE_TYPE = 'assessment'
ASSESSMENT_CONFIRMATION_PAGE_TYPE = 'test_confirmation'
TAGS_THAT_TRIGGER_BLOCK_COMPLETION = ['attempt-activity']
TAGS_THAT_TRIGGER_COMPONENT_COMPLETION = ['tag-assessment']
TAGS_THAT_TRIGGER_HTML_COMPLETION = ['attempt-lesson']
def get_first_lesson(handler, unit_id):
"""Returns the first lesson in the unit."""
lessons = handler.get_course().get_lessons(unit_id)
return lessons[0] if lessons else None
def _get_selected_unit_or_first_unit(handler):
# Finds unit requested or a first unit in the course.
u = handler.request.get('unit')
unit = handler.get_course().find_unit_by_id(u)
if not unit:
units = handler.get_course().get_units()
for current_unit in units:
if verify.UNIT_TYPE_UNIT == current_unit.type:
unit = current_unit
break
return unit
def _get_selected_or_first_lesson(handler, unit):
# Find lesson requested or a first lesson in the unit.
l = handler.request.get('lesson')
lesson = None
if not l:
lesson = get_first_lesson(handler, unit.unit_id)
else:
lesson = handler.get_course().find_lesson_by_id(unit, l)
return lesson
def extract_unit_and_lesson(handler):
"""Loads unit and lesson specified in the request."""
unit = _get_selected_unit_or_first_unit(handler)
if not unit:
return None, None
return unit, _get_selected_or_first_lesson(handler, unit)
def extract_unit_and_lesson_or_assessment(handler):
unit = _get_selected_unit_or_first_unit(handler)
if not unit:
return None, None, None
lesson = None
lesson_id = handler.request.get('lesson')
if lesson_id:
lesson = handler.get_course().find_lesson_by_id(unit, lesson_id)
assessment = None
assessment_id = handler.request.get('assessment')
if assessment_id:
assessment = handler.get_course().find_unit_by_id(assessment_id)
if lesson or assessment:
return unit, lesson, assessment
if unit.pre_assessment:
return unit, None, handler.get_course().find_unit_by_id(
unit.pre_assessment)
first_lesson = get_first_lesson(handler, unit.unit_id)
if first_lesson:
return unit, first_lesson, None
if unit.post_assessment:
return unit, None, handler.get_course().find_unit_by_id(
unit.post_assessment)
return unit, None, None
def get_unit_and_lesson_id_from_url(handler, url):
"""Extracts unit and lesson ids from a URL."""
url_components = urlparse.urlparse(url)
query_dict = urlparse.parse_qs(url_components.query)
if 'unit' not in query_dict:
return None, None
unit_id = query_dict['unit'][0]
lesson_id = None
if 'lesson' in query_dict:
lesson_id = query_dict['lesson'][0]
else:
lesson_id = get_first_lesson(handler, unit_id).lesson_id
return unit_id, lesson_id
def create_readonly_assessment_params(content, answers):
"""Creates parameters for a readonly assessment in the view templates."""
assessment_params = {
'preamble': content['assessment']['preamble'],
'questionsList': content['assessment']['questionsList'],
'answers': answers,
}
return assessment_params
def filter_assessments_used_within_units(units):
# Remove assessments that are to be treated as if they were in a unit.
referenced_assessments = set()
for unit in units:
if unit.type == verify.UNIT_TYPE_UNIT:
if unit.pre_assessment:
referenced_assessments.add(unit.pre_assessment)
if unit.post_assessment:
referenced_assessments.add(unit.post_assessment)
ret = []
for unit in list(units):
if unit.unit_id not in referenced_assessments:
ret.append(unit)
return ret
def augment_assessment_units(course, student):
"""Adds additional fields to assessment units."""
rp = course.get_reviews_processor()
for unit in course.get_units():
if unit.type == 'A':
if unit.needs_human_grader():
review_steps = rp.get_review_steps_by(
unit.unit_id, student.get_key())
review_min_count = unit.workflow.get_review_min_count()
unit.matcher = unit.workflow.get_matcher()
unit.review_progress = ReviewUtils.get_review_progress(
review_steps, review_min_count,
course.get_progress_tracker()
)
unit.is_submitted = rp.does_submission_exist(
unit.unit_id, student.get_key())
def is_progress_recorded(handler, student):
if student.is_transient:
return False
if CAN_PERSIST_ACTIVITY_EVENTS:
return True
course = handler.get_course()
units = handler.get_track_matching_student(student)
for unit in units:
if unit.manual_progress:
return True
for lesson in course.get_lessons(unit.unit_id):
if lesson.manual_progress:
return True
return False
def add_course_outline_to_template(handler, student):
"""Adds course outline with all units, lessons, progress to the template."""
_tracker = handler.get_progress_tracker()
if student and not student.is_transient:
augment_assessment_units(handler.get_course(), student)
handler.template_value['course_progress'] = (
_tracker.get_course_progress(student))
_tuples = []
units = handler.get_track_matching_student(student)
units = filter_assessments_used_within_units(units)
progress = _tracker.get_or_create_progress(
student) if is_progress_recorded(handler, student) else None
for _unit in units:
_lessons = handler.get_lessons(_unit.unit_id)
_lesson_progress = None
if progress:
_lesson_progress = _tracker.get_lesson_progress(
student, _unit.unit_id, progress=progress)
pre_assessment = None
if _unit.pre_assessment:
pre_assessment = handler.find_unit_by_id(_unit.pre_assessment)
post_assessment = None
if _unit.post_assessment:
post_assessment = handler.find_unit_by_id(_unit.post_assessment)
_tuple = (_unit, _lessons, _lesson_progress,
pre_assessment, post_assessment)
_tuples.append(_tuple)
handler.template_value['course_outline'] = _tuples
handler.template_value['unit_progress'] = _tracker.get_unit_progress(
student, progress=progress)
class CourseHandler(BaseHandler):
"""Handler for generating course page."""
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/events', EventsRESTHandler)]
def get(self):
"""Handles GET requests."""
models.MemcacheManager.begin_readonly()
try:
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
profile = StudentProfileDAO.get_profile_by_user_id(
user.user_id())
self.template_value['has_global_profile'] = profile is not None
if not student:
student = TRANSIENT_STUDENT
if (student.is_transient and
not self.app_context.get_environ()['course']['browsable']):
self.redirect('/preview')
return
# If we are on this page due to visiting the course base URL
# (and not base url plus "/course"), redirect registered students
# to the last page they were looking at.
last_location = self.get_redirect_location(student)
if last_location:
self.redirect(last_location)
return
tracker = self.get_progress_tracker()
units = self.get_track_matching_student(student)
units = filter_assessments_used_within_units(units)
self.template_value['units'] = units
self.template_value['show_registration_page'] = True
if student and not student.is_transient:
augment_assessment_units(self.get_course(), student)
self.template_value['course_progress'] = (
tracker.get_course_progress(student))
elif user:
profile = StudentProfileDAO.get_profile_by_user_id(
user.user_id())
additional_registration_fields = self.app_context.get_environ(
)['reg_form']['additional_registration_fields']
if profile is not None and not additional_registration_fields:
self.template_value['show_registration_page'] = False
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.template_value['transient_student'] = student.is_transient
self.template_value['progress'] = tracker.get_unit_progress(student)
course = self.app_context.get_environ()['course']
self.template_value['video_exists'] = bool(
'main_video' in course and
'url' in course['main_video'] and
course['main_video']['url'])
self.template_value['image_exists'] = bool(
'main_image' in course and
'url' in course['main_image'] and
course['main_image']['url'])
self.template_value['is_progress_recorded'] = is_progress_recorded(
self, student)
self.template_value['navbar'] = {'course': True}
finally:
models.MemcacheManager.end_readonly()
self.render('course.html')
class UnitHandler(BaseHandler):
"""Handler for generating unit page."""
# A list of callback functions which modules can use to add extra content
# panels at the bottom of the page. Each function receives the app_context
# as its single arg, and should return a string or None.
EXTRA_CONTENT = []
# The lesson title provider should be a function which receives the
# app_context, the unit, and the lesson, and returns a jinja2.Markup or a
# safe_dom object. If it returns None, the default title is used instead.
_LESSON_TITLE_PROVIDER = None
class UnitLeftNavElements(object):
def __init__(self, course, unit):
self._urls = []
self._index_by_label = {}
if unit.pre_assessment:
self._index_by_label['assessment.%d' % unit.pre_assessment] = (
len(self._urls))
self._urls.append('unit?unit=%s&assessment=%d' % (
unit.unit_id, unit.pre_assessment))
for lesson in course.get_lessons(unit.unit_id):
self._index_by_label['lesson.%s' % lesson.lesson_id] = (
len(self._urls))
self._urls.append('unit?unit=%s&lesson=%s' % (
unit.unit_id, lesson.lesson_id))
if lesson.activity and lesson.activity_listed:
self._index_by_label['activity.%s' % lesson.lesson_id] = (
len(self._urls))
self._urls.append('unit?unit=%s&lesson=%s&activity=true' % (
unit.unit_id, lesson.lesson_id))
if unit.post_assessment:
self._index_by_label['assessment.%d' % unit.post_assessment] = (
len(self._urls))
self._urls.append('unit?unit=%s&assessment=%d' % (
unit.unit_id, unit.post_assessment))
def get_url_by(self, item_type, item_id, offset):
index = self._index_by_label['%s.%s' % (item_type, item_id)]
index += offset
if index >= 0 and index < len(self._urls):
return self._urls[index]
else:
return None
@classmethod
def set_lesson_title_provider(cls, lesson_title_provider):
if cls._LESSON_TITLE_PROVIDER:
raise Exception('Lesson title provider already set by a module')
cls._LESSON_TITLE_PROVIDER = lesson_title_provider
def _default_lesson_title_provider(
self, app_context, unit, lesson, unused_student):
title_h1 = safe_dom.Element(
'h1', className='gcb-lesson-title').add_text(lesson.title)
can_see_drafts = courses_module.courses.can_see_drafts(self.app_context)
if not lesson.now_available and can_see_drafts:
title_h1.add_text(' ').add_child(
safe_dom.Element('span', id='lesson-title-private').add_text(
'(Private)'))
return safe_dom.Element('div', className='lesson-title').add_child(
title_h1)
def get(self):
"""Handles GET requests."""
models.MemcacheManager.begin_readonly()
try:
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
# Extract incoming args
unit, lesson, assessment = extract_unit_and_lesson_or_assessment(
self)
unit_id = unit.unit_id
# If the unit is not currently available, and the user does not have
# the permission to see drafts, redirect to the main page.
available_units = self.get_track_matching_student(student)
if ((not unit.now_available or unit not in available_units) and
not courses_module.courses.can_see_drafts(self.app_context)):
self.redirect('/')
return
# Set template values for nav bar and page type.
self.template_value['navbar'] = {'course': True}
# Set template values for a unit and its lesson entities
self.template_value['unit'] = unit
self.template_value['unit_id'] = unit.unit_id
# These attributes are needed in order to render questions (with
# progress indicators) in the lesson body. They are used by the
# custom component renderers in the assessment_tags module.
self.student = student
self.unit_id = unit_id
add_course_outline_to_template(self, student)
self.template_value['is_progress_recorded'] = is_progress_recorded(
self, student)
if (unit.show_contents_on_one_page and
'confirmation' not in self.request.params):
self._show_all_contents(student, unit)
else:
self._show_single_element(student, unit, lesson, assessment)
for extra_content_hook in self.EXTRA_CONTENT:
extra_content = extra_content_hook(self.app_context)
if extra_content is not None:
self.template_value['display_content'].append(extra_content)
self._set_gcb_html_element_class()
finally:
models.MemcacheManager.end_readonly()
self.render('unit.html')
def _set_gcb_html_element_class(self):
"""Select conditional CSS to hide parts of the unit page."""
# TODO(jorr): Add an integration test for this once, LTI producer and
# consumer code is completely checked in.
gcb_html_element_class = []
if self.request.get('hide-controls') == 'true':
gcb_html_element_class.append('hide-controls')
if self.request.get('hide-lesson-title') == 'true':
gcb_html_element_class.append('hide-lesson-title')
self.template_value['gcb_html_element_class'] = (
' '.join(gcb_html_element_class))
def _apply_gcb_tags(self, text):
return jinja_utils.get_gcb_tags_filter(self)(text)
def _show_all_contents(self, student, unit):
course = self.get_course()
display_content = []
left_nav_elements = UnitHandler.UnitLeftNavElements(
self.get_course(), unit)
if unit.unit_header:
display_content.append(self._apply_gcb_tags(unit.unit_header))
if unit.pre_assessment:
display_content.append(self.get_assessment_display_content(
student, unit, course.find_unit_by_id(unit.pre_assessment),
left_nav_elements, {}))
for lesson in course.get_lessons(unit.unit_id):
self.lesson_id = lesson.lesson_id
self.lesson_is_scored = lesson.scored
template_values = copy.copy(self.template_value)
self.set_lesson_content(student, unit, lesson, left_nav_elements,
template_values)
display_content.append(self.render_template_to_html(
template_values, 'lesson_common.html'))
del self.lesson_id
del self.lesson_is_scored
if unit.post_assessment:
display_content.append(self.get_assessment_display_content(
student, unit, course.find_unit_by_id(unit.post_assessment),
left_nav_elements, {}))
if unit.unit_footer:
display_content.append(self._apply_gcb_tags(unit.unit_footer))
self.template_value['display_content'] = display_content
def _showing_first_element(self, unit, lesson, assessment, is_activity):
"""Whether the unit page is showing the first element of a Unit."""
# If the unit has a pre-assessment, then that's the first element;
# we are showing the first element iff we are showing that assessment.
if unit.pre_assessment:
return (assessment and
str(assessment.unit_id) == str(unit.pre_assessment))
# If there is no pre-assessment, there may be lessons. If there
# are any lessons, then the first element is the first unit component.
# Iff we are showing that lesson, we're on the first component.
unit_lessons = self.get_course().get_lessons(unit.unit_id)
if unit_lessons:
if lesson and lesson.lesson_id == unit_lessons[0].lesson_id:
# If the first lesson has an activity, then we are showing
# the first element if we are showing the lesson, and not
# the activity.
return not is_activity
return False
# If there is no pre-assessment and no lessons, then the post-assessment
# is the first element. We are on the first element if we're showing
# that assessment.
if unit.post_assessment:
return (assessment and
str(assessment.unit_id) == str(unit.post_assessment))
# If unit has no pre-assessment, no lessons, and no post-assessment,
# then we're both at the first and last item.
if (not unit.pre_assessment and
not unit.post_assessment and
not unit_lessons):
return True
return False
def _showing_last_element(self, unit, lesson, assessment, is_activity):
"""Whether the unit page is showing the last element of a Unit."""
# If the unit has a post-assessment, then that's the last element;
# we are showing the last element iff we are showing that assessment.
if unit.post_assessment:
return (assessment and
str(assessment.unit_id) == str(unit.post_assessment))
# If there is no post-assessment, there may be lessons. If there
# are any lessons, then the last element is the last unit component.
# Iff we are showing that lesson, we're on the last component.
unit_lessons = self.get_course().get_lessons(unit.unit_id)
if unit_lessons:
if lesson and lesson.lesson_id == unit_lessons[-1].lesson_id:
# If the lesson has an activity, and we're showing the
# activity, that's last.
return is_activity == lesson.has_activity
return False
# If there is no post-assessment and there are no lessons, then
# the pre-assessment is the last item in the unit. We are on the
# last element if we're showing that assessment.
if unit.pre_assessment:
return (assessment and
str(assessment.unit_id) == str(unit.pre_assessment))
# If unit has no pre-assessment, no lessons, and no post-assessment,
# then we're both at the first and last item.
if (not unit.pre_assessment and
not unit.post_assessment and
not unit_lessons):
return True
return False
def _show_single_element(self, student, unit, lesson, assessment):
# Add markup to page which depends on the kind of content.
left_nav_elements = UnitHandler.UnitLeftNavElements(
self.get_course(), unit)
# need 'activity' to be True or False, and not the string 'true' or None
is_activity = (self.request.get('activity') != '' or
'/activity' in self.request.path)
display_content = []
if (unit.unit_header and
self._showing_first_element(unit, lesson, assessment, is_activity)):
display_content.append(self._apply_gcb_tags(unit.unit_header))
if assessment:
if 'confirmation' in self.request.params:
self.set_confirmation_content(student, unit, assessment,
left_nav_elements)
self.template_value['assessment_name'] = (
self.template_value.get('assessment_name').lower())
display_content.append(self.render_template_to_html(
self.template_value, 'test_confirmation_content.html'))
else:
display_content.append(self.get_assessment_display_content(
student, unit, assessment, left_nav_elements,
self.template_value))
elif lesson:
self.lesson_id = lesson.lesson_id
self.lesson_is_scored = lesson.scored
if is_activity:
self.set_activity_content(student, unit, lesson,
left_nav_elements)
else:
self.set_lesson_content(student, unit, lesson,
left_nav_elements, self.template_value)
display_content.append(self.render_template_to_html(
self.template_value, 'lesson_common.html'))
if (unit.unit_footer and
self._showing_last_element(unit, lesson, assessment, is_activity)):
display_content.append(self._apply_gcb_tags(unit.unit_footer))
self.template_value['display_content'] = display_content
def get_assessment_display_content(self, student, unit, assessment,
left_nav_elements, template_values):
template_values['page_type'] = ASSESSMENT_PAGE_TYPE
template_values['assessment'] = assessment
template_values['back_button_url'] = left_nav_elements.get_url_by(
'assessment', assessment.unit_id, -1)
template_values['next_button_url'] = left_nav_elements.get_url_by(
'assessment', assessment.unit_id, 1)
assessment_handler = AssessmentHandler()
assessment_handler.app_context = self.app_context
assessment_handler.request = self.request
return assessment_handler.get_assessment_content(
student, self.get_course(), assessment, as_lesson=True)
def set_confirmation_content(self, student, unit, assessment,
left_nav_elements):
course = self.get_course()
self.template_value['page_type'] = ASSESSMENT_CONFIRMATION_PAGE_TYPE
self.template_value['unit'] = unit
self.template_value['assessment'] = assessment
self.template_value['is_confirmation'] = True
self.template_value['assessment_name'] = assessment.title
self.template_value['score'] = (
course.get_score(student, str(assessment.unit_id)))
self.template_value['is_last_assessment'] = (
course.is_last_assessment(assessment))
self.template_value['overall_score'] = (
course.get_overall_score(student))
self.template_value['result'] = course.get_overall_result(student)
self.template_value['back_button_url'] = left_nav_elements.get_url_by(
'assessment', assessment.unit_id, 0)
self.template_value['next_button_url'] = left_nav_elements.get_url_by(
'assessment', assessment.unit_id, 1)
def set_activity_content(self, student, unit, lesson, left_nav_elements):
self.template_value['page_type'] = ACTIVITY_PAGE_TYPE
self.template_value['lesson'] = lesson
self.template_value['lesson_id'] = lesson.lesson_id
self.template_value['back_button_url'] = left_nav_elements.get_url_by(
'activity', lesson.lesson_id, -1)
self.template_value['next_button_url'] = left_nav_elements.get_url_by(
'activity', lesson.lesson_id, 1)
self.template_value['activity'] = {
'title': lesson.activity_title,
'activity_script_src': (
self.get_course().get_activity_filename(unit.unit_id,
lesson.lesson_id))}
self.template_value['page_type'] = 'activity'
self.template_value['title'] = lesson.activity_title
if is_progress_recorded(self, student):
# Mark this page as accessed. This is done after setting the
# student progress template value, so that the mark only shows up
# after the student visits the page for the first time.
self.get_course().get_progress_tracker().put_activity_accessed(
student, unit.unit_id, lesson.lesson_id)
def _get_lesson_title(self, unit, lesson, student):
title = None
if self._LESSON_TITLE_PROVIDER:
title = self._LESSON_TITLE_PROVIDER(
self.app_context, unit, lesson, student)
if title is None:
title = self._default_lesson_title_provider(
self.app_context, unit, lesson, student)
return title
def set_lesson_content(self, student, unit, lesson, left_nav_elements,
template_values):
template_values['page_type'] = UNIT_PAGE_TYPE
template_values['lesson'] = lesson
template_values['lesson_id'] = lesson.lesson_id
template_values['back_button_url'] = left_nav_elements.get_url_by(
'lesson', lesson.lesson_id, -1)
template_values['next_button_url'] = left_nav_elements.get_url_by(
'lesson', lesson.lesson_id, 1)
template_values['page_type'] = 'unit'
template_values['title'] = self._get_lesson_title(unit, lesson, student)
if not lesson.manual_progress and is_progress_recorded(self, student):
# Mark this page as accessed. This is done after setting the
# student progress template value, so that the mark only shows up
# after the student visits the page for the first time.
self.get_course().get_progress_tracker().put_html_accessed(
student, unit.unit_id, lesson.lesson_id)
class AssessmentHandler(BaseHandler):
"""Handler for generating assessment page."""
# pylint: disable=too-many-statements
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
# Extract incoming args, binding to self if needed.
assessment_name = self.request.get('name')
self.unit_id = assessment_name
course = self.get_course()
unit = course.find_unit_by_id(self.unit_id)
if not unit:
self.error(404)
return
# If assessment is used as a pre/post within a unit, go see that view.
parent_unit = course.get_parent_unit(self.unit_id)
if parent_unit:
self.redirect('/unit?unit=%s&assessment=%s' %
(parent_unit.unit_id, self.unit_id))
return
# If the assessment is not currently available, and the user does not
# have the permission to see drafts redirect to the main page.
if (not unit.now_available and
not courses_module.courses.can_see_drafts(self.app_context)):
self.redirect('/')
return
self.template_value['main_content'] = (
self.get_assessment_content(student, course, unit, as_lesson=False))
self.template_value['assessment_name'] = assessment_name
self.template_value['unit_id'] = self.unit_id
self.template_value['navbar'] = {'course': True}
self.render('assessment_page.html')
def get_assessment_content(self, student, course, unit, as_lesson):
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
self.template_value['model_version'] = model_version
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
configure_readonly_view = self.configure_readonly_view_1_4
configure_active_view = self.configure_active_view_1_4
get_review_received = self.get_review_received_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
configure_readonly_view = self.configure_readonly_view_1_5
configure_active_view = self.configure_active_view_1_5
get_review_received = self.get_review_received_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
self.template_value['unit_id'] = unit.unit_id
self.template_value['as_lesson'] = as_lesson
self.template_value['assessment_title'] = unit.title
self.template_value['assessment_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('assessment-post'))
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.template_value['grader'] = unit.workflow.get_grader()
readonly_view = False
due_date_exceeded = False
submission_contents = None
review_steps_for = []
submission_due_date = unit.workflow.get_submission_due_date()
if submission_due_date:
self.template_value['submission_due_date'] = (
submission_due_date.strftime(HUMAN_READABLE_DATETIME_FORMAT))
time_now = datetime.datetime.now()
if time_now > submission_due_date:
readonly_view = True
due_date_exceeded = True
self.template_value['due_date_exceeded'] = True
if course.needs_human_grader(unit) and not student.is_transient:
self.template_value['matcher'] = unit.workflow.get_matcher()
rp = course.get_reviews_processor()
review_steps_by = rp.get_review_steps_by(
unit.unit_id, student.get_key())
# Determine if the student can see others' reviews of his/her work.
if (ReviewUtils.has_completed_enough_reviews(
review_steps_by, unit.workflow.get_review_min_count())):
submission_and_review_steps = (
rp.get_submission_and_review_steps(
unit.unit_id, student.get_key()))
if submission_and_review_steps:
submission_contents = submission_and_review_steps[0]
review_steps_for = submission_and_review_steps[1]
review_keys_for_student = []
for review_step in review_steps_for:
can_show_review = (
review_step.state == domain.REVIEW_STATE_COMPLETED
and not review_step.removed
and review_step.review_key
)
if can_show_review:
review_keys_for_student.append(review_step.review_key)
reviews_for_student = rp.get_reviews_by_keys(
unit.unit_id, review_keys_for_student)
self.template_value['reviews_received'] = [get_review_received(
unit, review) for review in reviews_for_student]
else:
submission_contents = student_work.Submission.get_contents(
unit.unit_id, student.get_key())
# Determine whether to show the assessment in readonly mode.
if submission_contents or due_date_exceeded:
readonly_view = True
configure_readonly_view(unit, submission_contents)
if not readonly_view:
if not student.is_transient:
submission_contents = student_work.Submission.get_contents(
unit.unit_id, student.get_key())
configure_active_view(unit, submission_contents)
return self.render_template_to_html(
self.template_value, 'assessment.html')
def configure_readonly_view_1_4(self, unit, submission_contents):
self.template_value['readonly_student_assessment'] = (
create_readonly_assessment_params(
self.get_course().get_assessment_content(unit),
StudentWorkUtils.get_answer_list(submission_contents)))
def configure_readonly_view_1_5(self, unit, submission_contents):
self.template_value['readonly_student_assessment'] = True
self.template_value['html_content'] = unit.html_content
self.template_value['html_saved_answers'] = transforms.dumps(
submission_contents)
def configure_active_view_1_4(self, unit, submission_contents):
self.template_value['assessment_script_src'] = (
self.get_course().get_assessment_filename(unit.unit_id))
if submission_contents:
# If a previous submission exists, reinstate it.
self.template_value['saved_answers'] = transforms.dumps(
StudentWorkUtils.get_answer_list(submission_contents))
def configure_active_view_1_5(self, unit, submission_contents):
self.template_value['html_content'] = unit.html_content
self.template_value['html_check_answers'] = unit.html_check_answers
if submission_contents:
# If a previous submission exists, reinstate it.
self.template_value['html_saved_answers'] = transforms.dumps(
submission_contents)
def get_review_received_1_4(self, unit, review):
return create_readonly_assessment_params(
self.get_course().get_review_content(unit),
StudentWorkUtils.get_answer_list(review))
def get_review_received_1_5(self, unit, review):
return {
'content': unit.html_review_form,
'saved_answers': transforms.dumps(review)
}
class ReviewDashboardHandler(BaseHandler):
"""Handler for generating the index of reviews that a student has to do."""
def _populate_template(self, course, unit, review_steps):
"""Adds variables to the template for the review dashboard."""
self.template_value['assessment_name'] = unit.title
self.template_value['unit_id'] = unit.unit_id
parent_unit = course.get_parent_unit(unit.unit_id)
if parent_unit is not None:
self.template_value['back_link'] = 'unit?unit=%s&assessment=%s' % (
parent_unit.unit_id, unit.unit_id)
else:
self.template_value['back_link'] = (
'assessment?name=%s' % unit.unit_id)
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.template_value['review_dashboard_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('review-dashboard-post'))
self.template_value['REVIEW_STATE_COMPLETED'] = (
domain.REVIEW_STATE_COMPLETED)
self.template_value['review_steps'] = review_steps
self.template_value['review_min_count'] = (
unit.workflow.get_review_min_count())
review_due_date = unit.workflow.get_review_due_date()
if review_due_date:
self.template_value['review_due_date'] = review_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
time_now = datetime.datetime.now()
self.template_value['due_date_exceeded'] = (time_now > review_due_date)
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
rp = course.get_reviews_processor()
unit, _ = extract_unit_and_lesson(self)
if not unit:
self.error(404)
return
self.template_value['navbar'] = {'course': True}
if not course.needs_human_grader(unit):
self.error(404)
return
# Check that the student has submitted the corresponding assignment.
if not rp.does_submission_exist(unit.unit_id, student.get_key()):
self.template_value['error_code'] = (
'cannot_review_before_submitting_assignment')
self.render('error.html')
return
review_steps = rp.get_review_steps_by(unit.unit_id, student.get_key())
self._populate_template(course, unit, review_steps)
required_review_count = unit.workflow.get_review_min_count()
# The student can request a new submission if:
# - all his/her current reviews are in Draft/Completed state, and
# - he/she is not in the state where the required number of reviews
# has already been requested, but not all of these are completed.
self.template_value['can_request_new_review'] = (
len(review_steps) < required_review_count or
ReviewUtils.has_completed_all_assigned_reviews(review_steps)
)
self.render('review_dashboard.html')
def post(self):
"""Allows a reviewer to request a new review."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(
self.request, 'review-dashboard-post'):
return
course = self.get_course()
unit, unused_lesson = extract_unit_and_lesson(self)
if not unit:
self.error(404)
return
rp = course.get_reviews_processor()
review_steps = rp.get_review_steps_by(unit.unit_id, student.get_key())
self.template_value['navbar'] = {'course': True}
if not course.needs_human_grader(unit):
self.error(404)
return
# Check that the student has submitted the corresponding assignment.
if not rp.does_submission_exist(unit.unit_id, student.get_key()):
self.template_value['error_code'] = (
'cannot_review_before_submitting_assignment')
self.render('error.html')
return
# Check that the review due date has not passed.
time_now = datetime.datetime.now()
review_due_date = unit.workflow.get_review_due_date()
if time_now > review_due_date:
self.template_value['error_code'] = (
'cannot_request_review_after_deadline')
self.render('error.html')
return
# Check that the student can request a new review.
review_min_count = unit.workflow.get_review_min_count()
can_request_new_review = (
len(review_steps) < review_min_count or
ReviewUtils.has_completed_all_assigned_reviews(review_steps))
if not can_request_new_review:
self.template_value['review_min_count'] = review_min_count
self.template_value['error_code'] = 'must_complete_more_reviews'
self.render('error.html')
return
self.template_value['no_submissions_available'] = True
try:
review_step_key = rp.get_new_review(unit.unit_id, student.get_key())
redirect_params = {
'key': review_step_key,
'unit': unit.unit_id,
}
self.redirect('/review?%s' % urllib.urlencode(redirect_params))
except Exception: # pylint: disable=broad-except
review_steps = rp.get_review_steps_by(
unit.unit_id, student.get_key())
self._populate_template(course, unit, review_steps)
self.render('review_dashboard.html')
class ReviewHandler(BaseHandler):
"""Handler for generating the submission page for individual reviews."""
# pylint: disable=too-many-statements
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
course = self.get_course()
rp = course.get_reviews_processor()
unit, unused_lesson = extract_unit_and_lesson(self)
if not course.needs_human_grader(unit):
self.error(404)
return
review_step_key = self.request.get('key')
if not unit or not review_step_key:
self.error(404)
return
try:
review_step_key = db.Key(encoded=review_step_key)
review_step = rp.get_review_steps_by_keys(
unit.unit_id, [review_step_key])[0]
except Exception: # pylint: disable=broad-except
self.error(404)
return
if not review_step:
self.error(404)
return
# Check that the student is allowed to review this submission.
if not student.has_same_key_as(review_step.reviewer_key):
self.error(404)
return
model_version = course.get_assessment_model_version(unit)
assert model_version in courses.SUPPORTED_ASSESSMENT_MODEL_VERSIONS
self.template_value['model_version'] = model_version
if model_version == courses.ASSESSMENT_MODEL_VERSION_1_4:
configure_assessment_view = self.configure_assessment_view_1_4
configure_readonly_review = self.configure_readonly_review_1_4
configure_active_review = self.configure_active_review_1_4
elif model_version == courses.ASSESSMENT_MODEL_VERSION_1_5:
configure_assessment_view = self.configure_assessment_view_1_5
configure_readonly_review = self.configure_readonly_review_1_5
configure_active_review = self.configure_active_review_1_5
else:
raise ValueError('Bad assessment model version: %s' % model_version)
self.template_value['navbar'] = {'course': True}
self.template_value['unit_id'] = unit.unit_id
self.template_value['key'] = review_step_key
submission_key = review_step.submission_key
submission_contents = student_work.Submission.get_contents_by_key(
submission_key)
configure_assessment_view(unit, submission_contents)
review_due_date = unit.workflow.get_review_due_date()
if review_due_date:
self.template_value['review_due_date'] = review_due_date.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
review_key = review_step.review_key
rev = rp.get_reviews_by_keys(
unit.unit_id, [review_key])[0] if review_key else None
time_now = datetime.datetime.now()
show_readonly_review = (
review_step.state == domain.REVIEW_STATE_COMPLETED or
time_now > review_due_date)
self.template_value['due_date_exceeded'] = (time_now > review_due_date)
if show_readonly_review:
configure_readonly_review(unit, rev)
else:
# Populate the review form,
configure_active_review(unit, rev)
self.template_value['assessment_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('review-post'))
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.render('review.html')
def configure_assessment_view_1_4(self, unit, submission_contents):
readonly_student_assessment = create_readonly_assessment_params(
self.get_course().get_assessment_content(unit),
StudentWorkUtils.get_answer_list(submission_contents))
self.template_value[
'readonly_student_assessment'] = readonly_student_assessment
def configure_assessment_view_1_5(self, unit, submission_contents):
self.template_value['html_review_content'] = unit.html_content
self.template_value['html_reviewee_answers'] = transforms.dumps(
submission_contents)
def configure_readonly_review_1_4(self, unit, review_contents):
readonly_review_form = create_readonly_assessment_params(
self.get_course().get_review_content(unit),
StudentWorkUtils.get_answer_list(review_contents))
self.template_value['readonly_review_form'] = readonly_review_form
def configure_readonly_review_1_5(self, unit, review_contents):
self.template_value['readonly_review_form'] = True
self.template_value['html_review_form'] = unit.html_review_form
self.template_value['html_review_answers'] = transforms.dumps(
review_contents)
def configure_active_review_1_4(self, unit, review_contents):
self.template_value['assessment_script_src'] = (
self.get_course().get_review_filename(unit.unit_id))
saved_answers = (
StudentWorkUtils.get_answer_list(review_contents)
if review_contents else [])
self.template_value['saved_answers'] = transforms.dumps(saved_answers)
def configure_active_review_1_5(self, unit, review_contents):
self.template_value['html_review_form'] = unit.html_review_form
self.template_value['html_review_answers'] = transforms.dumps(
review_contents)
def post(self):
"""Handles POST requests, when a reviewer submits a review."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'review-post'):
return
course = self.get_course()
rp = course.get_reviews_processor()
unit_id = self.request.get('unit_id')
unit = self.find_unit_by_id(unit_id)
if not unit or not course.needs_human_grader(unit):
self.error(404)
return
review_step_key = self.request.get('key')
if not review_step_key:
self.error(404)
return
try:
review_step_key = db.Key(encoded=review_step_key)
review_step = rp.get_review_steps_by_keys(
unit.unit_id, [review_step_key])[0]
except Exception: # pylint: disable=broad-except
self.error(404)
return
# Check that the student is allowed to review this submission.
if not student.has_same_key_as(review_step.reviewer_key):
self.error(404)
return
self.template_value['navbar'] = {'course': True}
self.template_value['unit_id'] = unit.unit_id
# Check that the review due date has not passed.
time_now = datetime.datetime.now()
review_due_date = unit.workflow.get_review_due_date()
if time_now > review_due_date:
self.template_value['time_now'] = time_now.strftime(
HUMAN_READABLE_DATETIME_FORMAT)
self.template_value['review_due_date'] = (
review_due_date.strftime(HUMAN_READABLE_DATETIME_FORMAT))
self.template_value['error_code'] = 'review_deadline_exceeded'
self.render('error.html')
return
mark_completed = (self.request.get('is_draft') == 'false')
self.template_value['is_draft'] = (not mark_completed)
review_payload = self.request.get('answers')
review_payload = transforms.loads(
review_payload) if review_payload else []
try:
rp.write_review(
unit.unit_id, review_step_key, review_payload, mark_completed)
course.update_final_grades(student)
except domain.TransitionError:
self.template_value['error_code'] = 'review_already_submitted'
self.render('error.html')
return
self.render('review_confirmation.html')
class EventsRESTHandler(BaseRESTHandler):
"""Provides REST API for an Event."""
def get(self):
"""Returns a 404 error; this handler should not be GET-accessible."""
self.error(404)
return
def _add_request_facts(self, payload_json):
payload_dict = transforms.loads(payload_json)
if 'loc' not in payload_dict:
payload_dict['loc'] = {}
loc = payload_dict['loc']
loc['locale'] = self.get_locale_for(self.request, self.app_context)
loc['language'] = self.request.headers.get('Accept-Language')
loc['country'] = self.request.headers.get('X-AppEngine-Country')
loc['region'] = self.request.headers.get('X-AppEngine-Region')
loc['city'] = self.request.headers.get('X-AppEngine-City')
lat_long = self.request.headers.get('X-AppEngine-CityLatLong')
if lat_long:
latitude, longitude = lat_long.split(',')
loc['lat'] = float(latitude)
loc['long'] = float(longitude)
user_agent = self.request.headers.get('User-Agent')
if user_agent:
payload_dict['user_agent'] = user_agent
payload_json = transforms.dumps(payload_dict).lstrip(
models.transforms.JSON_XSSI_PREFIX)
return payload_json
def post(self):
"""Receives event and puts it into datastore."""
COURSE_EVENTS_RECEIVED.inc()
can = (
CAN_PERSIST_ACTIVITY_EVENTS.value or
CAN_PERSIST_PAGE_EVENTS.value or
CAN_PERSIST_TAG_EVENTS.value)
if not can:
return
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, 'event-post', {}):
return
user = self.get_user()
if not user:
return
source = request.get('source')
payload_json = request.get('payload')
payload_json = self._add_request_facts(payload_json)
models.EventEntity.record(source, user, payload_json)
COURSE_EVENTS_RECORDED.inc()
self.process_event(user, source, payload_json)
def process_event(self, user, source, payload_json):
"""Processes an event after it has been recorded in the event stream."""
student = models.Student.get_enrolled_student_by_email(user.email())
if not student:
return
payload = transforms.loads(payload_json)
if 'location' not in payload:
return
source_url = payload['location']
if source in TAGS_THAT_TRIGGER_BLOCK_COMPLETION:
unit_id, lesson_id = get_unit_and_lesson_id_from_url(
self, source_url)
if unit_id is not None and lesson_id is not None:
self.get_course().get_progress_tracker().put_block_completed(
student, unit_id, lesson_id, payload['index'])
elif source in TAGS_THAT_TRIGGER_COMPONENT_COMPLETION:
unit_id, lesson_id = get_unit_and_lesson_id_from_url(
self, source_url)
cpt_id = payload['instanceid']
if (unit_id is not None and lesson_id is not None and
cpt_id is not None):
self.get_course().get_progress_tracker(
).put_component_completed(
student, unit_id, lesson_id, cpt_id)
elif source in TAGS_THAT_TRIGGER_HTML_COMPLETION:
# Records progress for scored lessons.
unit_id, lesson_id = get_unit_and_lesson_id_from_url(
self, source_url)
course = self.get_course()
unit = course.find_unit_by_id(unit_id)
lesson = course.find_lesson_by_id(unit, lesson_id)
if (unit_id is not None and
lesson_id is not None and
not lesson.manual_progress):
self.get_course().get_progress_tracker().put_html_completed(
student, unit_id, lesson_id)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto (saifu@google.com)'
import datetime
import HTMLParser
import os
import re
import urllib
import urlparse
import jinja2
import sites
import webapp2
import appengine_config
from common import jinja_utils
from common import locales
from common import resource
from common import safe_dom
from common import schema_fields
from common import tags
from common import utils as common_utils
from common.crypto import XsrfTokenManager
from models import courses
from models import resources_display
from models import models
from models import transforms
from models.config import ConfigProperty
from models.courses import Course
from models.models import Student
from models.models import StudentProfileDAO
from models.models import TransientStudent
from models.roles import Roles
from modules import courses as courses_module
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
# The name of the cookie used to store the locale prefs for users out of session
GUEST_LOCALE_COOKIE = 'cb-user-locale'
GUEST_LOCALE_COOKIE_MAX_AGE_SEC = 48 * 60 * 60 # 48 hours
TRANSIENT_STUDENT = TransientStudent()
# Whether to output debug info into the page.
CAN_PUT_DEBUG_INFO_INTO_PAGES = ConfigProperty(
'gcb_can_put_debug_info_into_pages', bool, (
'Whether or not to put debugging information into the web pages. '
'This may be useful for debugging purposes if you develop custom '
'Course Builder features or extensions.'), False)
# Whether to record page load/unload events in a database.
CAN_PERSIST_PAGE_EVENTS = ConfigProperty(
'gcb_can_persist_page_events', bool, (
'Whether or not to record student page interactions in a '
'datastore. Without event recording, you cannot analyze student '
'page interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record tag events in a database.
CAN_PERSIST_TAG_EVENTS = ConfigProperty(
'gcb_can_persist_tag_events', bool, (
'Whether or not to record student tag interactions in a '
'datastore. Without event recording, you cannot analyze student '
'tag interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Whether to record events in a database.
CAN_PERSIST_ACTIVITY_EVENTS = ConfigProperty(
'gcb_can_persist_activity_events', bool, (
'Whether or not to record student activity interactions in a '
'datastore. Without event recording, you cannot analyze student '
'activity interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
# Date format string for displaying datetimes in UTC.
# Example: 2013-03-21 13:00 UTC
HUMAN_READABLE_DATETIME_FORMAT = '%Y-%m-%d, %H:%M UTC'
# Date format string for displaying dates. Example: 2013-03-21
HUMAN_READABLE_DATE_FORMAT = '%Y-%m-%d'
# Time format string for displaying times. Example: 01:16:40 UTC.
HUMAN_READABLE_TIME_FORMAT = '%H:%M:%S UTC'
class PageInitializer(object):
"""Abstract class that defines an interface to initialize page headers."""
@classmethod
def initialize(cls, template_value):
raise NotImplementedError
class DefaultPageInitializer(PageInitializer):
"""Implements default page initializer."""
@classmethod
def initialize(cls, template_value):
pass
class PageInitializerService(object):
"""Installs the appropriate PageInitializer."""
_page_initializer = DefaultPageInitializer
@classmethod
def get(cls):
return cls._page_initializer
@classmethod
def set(cls, page_initializer):
cls._page_initializer = page_initializer
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.default_action
if action not in self.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or action not in self.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class HtmlHooks(object):
# As of Q1, 2015, hook points moved from "where-ever in the course
# settings is convenient" to "Anywhere under the top-level 'html_hooks'
# item". Older courses may have these items still referenced from the
# root of the settings hierarchy, rather than under "html_hooks". Rather
# than modifying the course settings, we simply also look for these legacy
# items in the old locations.
BACKWARD_COMPATIBILITY_ITEMS = [
'base.before_head_tag_ends',
'base.after_body_tag_begins',
'base.after_navbar_begins',
'base.before_navbar_ends',
'base.after_top_content_ends',
'base.after_main_content_ends',
'base.before_body_tag_ends',
'unit.after_leftnav_begins',
'unit.before_leftnav_ends',
'unit.after_content_begins',
'unit.before_content_ends',
'preview.after_top_content_ends',
'preview.after_main_content_ends',
]
# We used to use colons to separate path components in hook names. Now
# that I18N is using colons to delimit key components, we need to pick
# a different separator. There may be old Jinja templates using the old
# naming style, so continue to permit it.
BACKWARD_COMPATIBILITY_SEPARATOR = ':'
# Name for the top-level course settings section now holding the hooks,
# all of the hooks, and nothing but the hooks.
HTML_HOOKS = 'html_hooks'
# Extension modules may be called back from HtmlHooks.__init__. In
# particular, I18N's mode of operation is to hook load functionality to
# replace strings with translated versions.
POST_LOAD_CALLBACKS = []
# Path component separator. Allows sub-structure within the html_hooks
# top-level dict.
SEPARATOR = '.'
def __init__(self, course, prefs=None):
if prefs is None:
prefs = models.StudentPreferencesDAO.load_or_create()
# Fetch all the hooks. Since these are coming from the course
# settings, getting them all is not too inefficient.
self.content = self.get_all(course)
# Call callbacks to let extension modules know we have text loaded,
# in case they need to modify, replace, or extend anything.
for callback in self.POST_LOAD_CALLBACKS:
callback(self.content)
# When the course admin sees hooks, we may need to add nonblank
# text so the admin can have a place to click to edit them.
self.show_admin_content = False
if (prefs and prefs.show_hooks and
Roles.is_course_admin(course.app_context)):
self.show_admin_content = True
if course.version == courses.CourseModel12.VERSION:
self.show_admin_content = False
if self.show_admin_content:
self.update_for_admin()
def update_for_admin(self):
"""Show HTML hooks with non-blank text if admin has edit pref set.
If we are displaying to a course admin, and the admin has enabled
a preference, we want to ensure that each HTML hook point has some
non-blank text in it. (Hooks often carry only scripts, or other
non-displaying tags). Having some actual text in the tag forces
browsers to give it a visible component on the page. Clicking on
this component permits the admin to edit the item.
"""
class VisibleHtmlParser(HTMLParser.HTMLParser):
def __init__(self, *args, **kwargs):
HTMLParser.HTMLParser.__init__(self, *args, **kwargs)
self._has_visible_content = False
def handle_starttag(self, unused_tag, unused_attrs):
# Not 100% guaranteed; e.g., <p> does not guarantee content,
# but <button> does -- even if the <button> does not contain
# data/entity/char. I don't want to spend a lot of logic
# looking for specific cases, and this behavior is enough.
self._has_visible_content = True
def handle_data(self, data):
if data.strip():
self._has_visible_content = True
def handle_entityref(self, unused_data):
self._has_visible_content = True
def handle_charref(self, unused_data):
self._has_visible_content = True
def has_visible_content(self):
return self._has_visible_content
def reset(self):
HTMLParser.HTMLParser.reset(self)
self._has_visible_content = False
parser = VisibleHtmlParser()
for key, value in self.content.iteritems():
parser.reset()
parser.feed(value)
parser.close()
if not parser.has_visible_content():
self.content[key] += key
@classmethod
def _get_content_from(cls, name, environ):
# Look up desired content chunk in course.yaml dict/sub-dict.
content = None
for part in name.split(cls.SEPARATOR):
if part in environ:
item = environ[part]
if isinstance(item, basestring):
content = item
else:
environ = item
return content
@classmethod
def get_content(cls, course, name):
environ = course.app_context.get_environ()
# Prefer getting hook content from html_hooks sub-dict within
# course settings.
content = cls._get_content_from(name, environ.get(cls.HTML_HOOKS, {}))
# For backward compatibility, fall back to looking in top level.
if content is None:
content = cls._get_content_from(name, environ)
return content
@classmethod
def get_all(cls, course):
"""Get all hook names and associated content."""
ret = {}
# Look through the backward-compatibility items. These may not all
# exist, but pick up whatever does already exist.
environ = course.app_context.get_environ()
for backward_compatibility_item in cls.BACKWARD_COMPATIBILITY_ITEMS:
value = cls._get_content_from(backward_compatibility_item, environ)
if value:
ret[backward_compatibility_item] = value
# Pick up hook values from the official location under 'html_hooks'
# within course settings. These can override backward-compatible
# versions when both are present.
def find_leaves(environ, parent_names, ret):
for name, value in environ.iteritems():
if isinstance(value, basestring):
full_name = cls.SEPARATOR.join(parent_names + [name])
ret[full_name] = value
elif isinstance(value, dict):
find_leaves(value, parent_names + [name], ret)
find_leaves(environ[cls.HTML_HOOKS], [], ret)
return ret
def insert(self, name):
name = name.replace(self.BACKWARD_COMPATIBILITY_SEPARATOR,
self.SEPARATOR)
content = self.content.get(name, '')
# Add the content to the page in response to the hook call.
hook_div = safe_dom.Element('div', className='gcb-html-hook',
id=re.sub('[^a-zA-Z-]', '-', name))
hook_div.add_child(tags.html_to_safe_dom(content, self))
# Mark up content to enable edit controls
if self.show_admin_content:
hook_div.add_attribute(onclick='gcb_edit_hook_point("%s")' % name)
hook_div.add_attribute(className='gcb-html-hook-edit')
return jinja2.Markup(hook_div.sanitized)
class ResourceHtmlHook(resource.AbstractResourceHandler):
"""Provide a class to allow treating this resource type polymorphically."""
TYPE = 'html_hook'
NAME = 'name'
CONTENT = 'content'
@classmethod
def get_resource(cls, course, key):
return cls.get_data_dict(course, key)
@classmethod
def get_resource_title(cls, rsrc):
return rsrc[cls.NAME]
@classmethod
def get_schema(cls, unused_course, unused_key):
ret = schema_fields.FieldRegistry(
'HTML Hooks',
description='HTML fragments that can be inserted at arbitrary '
'points in student-visible pages using the syntax: '
' {{ html_hooks.insert(\'name_of_hook_section\') }} ')
ret.add_property(schema_fields.SchemaField(
cls.NAME, 'Name', 'string', i18n=False))
ret.add_property(schema_fields.SchemaField(
cls.CONTENT, 'Content', 'html', editable=True,
description='HTML content injected into page where hook '
'is referenced.'))
return ret
@classmethod
def to_data_dict(cls, key, content):
return {
cls.NAME: key,
cls.CONTENT: content,
}
@classmethod
def get_data_dict(cls, course, key):
return cls.to_data_dict(key, HtmlHooks.get_content(course, key))
@classmethod
def get_view_url(cls, rsrc):
return None
@classmethod
def get_edit_url(cls, key):
return 'dashboard?%s' % urllib.urlencode({
'action': 'edit_html_hook',
'key': key
})
@classmethod
def get_all(cls, course):
"""Returns key/value pairs of resource.Key -> <html-hook resource>"""
ret = {}
for name, content in HtmlHooks.get_all(course).iteritems():
key = resource.Key(cls.TYPE, name, course)
value = {
cls.NAME: name,
cls.CONTENT: content
}
ret[key] = value
return ret
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
LEFT_LINKS = []
RIGHT_LINKS = []
EXTRA_GLOBAL_CSS_URLS = []
EXTRA_GLOBAL_JS_URLS = []
@classmethod
def is_absolute(cls, url):
return sites.ApplicationContext.is_absolute_url(url)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not sites.ApplicationContext.is_absolute_url(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.netloc, base, None, None, None))
return base
def render_template_to_html(self, template_values, template_file,
additional_dirs=None):
courses.Course.set_current(self.get_course())
models.MemcacheManager.begin_readonly()
try:
template = self.get_template(template_file, additional_dirs)
return jinja2.utils.Markup(
template.render(template_values, autoescape=True))
finally:
models.MemcacheManager.end_readonly()
courses.Course.clear_current()
def get_template(self, template_file, additional_dirs=None, prefs=None):
raise NotImplementedError()
@classmethod
def canonicalize_url_for(cls, app_context, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
return app_context.canonicalize_url(location)
def canonicalize_url(self, location):
if hasattr(self, 'app_context'):
return self.app_context.canonicalize_url(location)
else:
return location
def redirect(self, location, normalize=True):
if normalize:
location = self.canonicalize_url(location)
super(ApplicationHandler, self).redirect(location)
class CourseHandler(ApplicationHandler):
"""Base handler that is aware of the current course."""
def __init__(self, *args, **kwargs):
super(CourseHandler, self).__init__(*args, **kwargs)
self.course = None
self.template_value = {}
def get_user(self):
"""Get the current user."""
return users.get_current_user()
def get_student(self):
"""Get the current student."""
user = self.get_user()
if user is None:
return None
return Student.get_by_email(user.email())
def _pick_first_valid_locale_from_list(self, desired_locales):
available_locales = self.app_context.get_allowed_locales()
for lang in desired_locales:
for available_locale in available_locales:
if lang.lower() == available_locale.lower():
return lang
return None
def get_locale_for(self, request, app_context, student=None, prefs=None):
"""Returns a locale that should be used by this request."""
hl = request.get('hl')
if hl and hl in self.app_context.get_allowed_locales():
return hl
if self.get_user():
# check if student has any locale labels assigned
if student is None:
student = self.get_student()
if student and student.is_enrolled and not student.is_transient:
student_label_ids = student.get_labels_of_type(
models.LabelDTO.LABEL_TYPE_LOCALE)
if student_label_ids:
all_labels = models.LabelDAO.get_all_of_type(
models.LabelDTO.LABEL_TYPE_LOCALE)
student_locales = []
for label in all_labels:
if label.type != models.LabelDTO.LABEL_TYPE_LOCALE:
continue
if label.id in student_label_ids:
student_locales.append(label.title)
locale = self._pick_first_valid_locale_from_list(
student_locales)
if locale:
return locale
# check if user preferences have been set
if prefs is None:
prefs = models.StudentPreferencesDAO.load_or_create()
if prefs is not None and prefs.locale is not None:
return prefs.locale
locale_cookie = self.request.cookies.get(GUEST_LOCALE_COOKIE)
if locale_cookie and (
locale_cookie in self.app_context.get_allowed_locales()):
return locale_cookie
# check if accept language has been set
accept_langs = request.headers.get('Accept-Language')
locale = self._pick_first_valid_locale_from_list(
[lang for lang, _ in locales.parse_accept_language(accept_langs)])
if locale:
return locale
return app_context.default_locale
def gettext(self, text):
old_locale = self.app_context.get_current_locale()
try:
new_locale = self.get_locale_for(self.request, self.app_context)
self.app_context.set_current_locale(new_locale)
return self.app_context.gettext(text)
finally:
self.app_context.set_current_locale(old_locale)
def get_course(self):
"""Get current course."""
if not self.course:
self.course = Course(self)
return self.course
def get_track_matching_student(self, student):
"""Gets units whose labels match those on the student."""
return self.get_course().get_track_matching_student(student)
def get_progress_tracker(self):
"""Gets the progress tracker for the course."""
return self.get_course().get_progress_tracker()
def find_unit_by_id(self, unit_id):
"""Gets a unit with a specific id or fails with an exception."""
return self.get_course().find_unit_by_id(unit_id)
def get_units(self):
"""Gets all units in the course."""
return self.get_course().get_units()
def get_lessons(self, unit_id):
"""Gets all lessons (in order) in the specific course unit."""
return self.get_course().get_lessons(unit_id)
@classmethod
def _cache_debug_info(cls, cache):
items = []
for key, entry in cache.items.iteritems():
updated_on = None
if entry:
updated_on = entry.updated_on()
items.append('entry: %s, %s' % (key, updated_on))
return items
@classmethod
def debug_info(cls):
"""Generates a debug info for this request."""
# we only want to run import if this method is called; most of the
# it is not; we also have circular import dependencies if we were to
# put them at the top...
from models import vfs
from modules.i18n_dashboard import i18n_dashboard
vfs_items = cls._cache_debug_info(
vfs.ProcessScopedVfsCache.instance().cache)
rb_items = cls._cache_debug_info(
i18n_dashboard.ProcessScopedResourceBundleCache.instance().cache)
return ''.join([
'\nDebug Info: %s' % datetime.datetime.utcnow(),
'\n\nServer Environment Variables: %s' % '\n'.join([
'item: %s, %s' % (key, value)
for key, value in os.environ.iteritems()]),
'\n\nVfsCacheKeys:\n%s' % '\n'.join(vfs_items),
'\n\nResourceBundlesCache:\n%s' % '\n'.join(rb_items),
])
def init_template_values(self, environ, prefs=None):
"""Initializes template variables with common values."""
self.template_value[COURSE_INFO_KEY] = environ
self.template_value[
'page_locale'] = self.app_context.get_current_locale()
self.template_value['html_hooks'] = HtmlHooks(
self.get_course(), prefs=prefs)
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value['can_see_drafts'] = (
courses_module.courses.can_see_drafts(self.app_context))
self.template_value[
'is_read_write_course'] = self.app_context.fs.is_read_write()
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
self.template_value['left_links'] = []
for func in self.LEFT_LINKS:
self.template_value['left_links'].extend(func(self.app_context))
self.template_value['right_links'] = []
for func in self.RIGHT_LINKS:
self.template_value['right_links'].extend(func(self.app_context))
if not prefs:
prefs = models.StudentPreferencesDAO.load_or_create()
self.template_value['student_preferences'] = prefs
if (Roles.is_course_admin(self.app_context) and
not appengine_config.PRODUCTION_MODE and
prefs and prefs.show_jinja_context):
@jinja2.contextfunction
def get_context(context):
return context
self.template_value['context'] = get_context
if CAN_PUT_DEBUG_INFO_INTO_PAGES.value:
self.template_value['debug_info'] = self.debug_info()
self.template_value[
'extra_global_css_urls'] = self.EXTRA_GLOBAL_CSS_URLS
self.template_value[
'extra_global_js_urls'] = self.EXTRA_GLOBAL_JS_URLS
# Common template information for the locale picker (only shown for
# user in session)
can_student_change_locale = (
self.get_course().get_course_setting('can_student_change_locale')
or self.get_course().app_context.can_pick_all_locales())
if can_student_change_locale:
self.template_value['available_locales'] = [
{
'name': locales.get_locale_display_name(loc),
'value': loc
} for loc in self.app_context.get_allowed_locales()]
self.template_value['locale_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token(
StudentLocaleRESTHandler.XSRF_TOKEN_NAME))
self.template_value['selected_locale'] = self.get_locale_for(
self.request, self.app_context, prefs=prefs)
def get_template(self, template_file, additional_dirs=None, prefs=None):
"""Computes location of template files for the current namespace."""
_p = self.app_context.get_environ()
self.init_template_values(_p, prefs=prefs)
template_environ = self.app_context.get_template_environ(
self.app_context.get_current_locale(), additional_dirs)
template_environ.filters[
'gcb_tags'] = jinja_utils.get_gcb_tags_filter(self)
template_environ.globals.update({
'display_unit_title': (
lambda unit: resources_display.display_unit_title(
unit, self.app_context)),
'display_short_unit_title': (
lambda unit: resources_display.display_short_unit_title(
unit, self.app_context)),
'display_lesson_title': (
lambda unit, lesson: resources_display.display_lesson_title(
unit, lesson, self.app_context))})
return template_environ.get_template(template_file)
class BaseHandler(CourseHandler):
"""Base handler."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
self._old_locale = None
def before_method(self, verb, path):
"""Modify global locale value for the duration of this handler."""
self._old_locale = self.app_context.get_current_locale()
new_locale = self.get_locale_for(self.request, self.app_context)
self.app_context.set_current_locale(new_locale)
def after_method(self, verb, path):
"""Restore original global locale value."""
self.app_context.set_current_locale(self._old_locale)
def personalize_page_and_get_user(self):
"""If the user exists, add personalized fields to the navbar."""
user = self.get_user()
PageInitializerService.get().initialize(self.template_value)
if hasattr(self, 'app_context'):
self.template_value['can_register'] = self.app_context.get_environ(
)['reg_form']['can_register']
if user:
email = user.email()
self.template_value['email_no_domain_name'] = (
email[:email.find('@')] if '@' in email else email)
self.template_value['email'] = email
self.template_value['logoutUrl'] = (
users.create_logout_url(self.request.uri))
self.template_value['transient_student'] = False
# configure page events
self.template_value['record_tag_events'] = (
CAN_PERSIST_TAG_EVENTS.value)
self.template_value['record_page_events'] = (
CAN_PERSIST_PAGE_EVENTS.value)
self.template_value['record_events'] = (
CAN_PERSIST_ACTIVITY_EVENTS.value)
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
else:
self.template_value['loginUrl'] = users.create_login_url(
self.request.uri)
self.template_value['transient_student'] = True
return None
return user
def personalize_page_and_get_enrolled(
self, supports_transient_student=False):
"""If the user is enrolled, add personalized fields to the navbar."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
self.template_value['transient_student'] = True
student = TRANSIENT_STUDENT
if student.is_transient:
if supports_transient_student and (
self.app_context.get_environ()['course']['browsable']):
return TRANSIENT_STUDENT
elif user is None:
self.redirect(
users.create_login_url(self.request.uri), normalize=False
)
return None
else:
self.redirect('/preview')
return None
# Patch Student models which (for legacy reasons) do not have a user_id
# attribute set.
if not student.user_id:
student.user_id = user.user_id()
student.put()
return student
def assert_xsrf_token_or_fail(self, request, action):
"""Asserts the current request has proper XSRF token or fails."""
token = request.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
self.error(403)
return False
return True
@appengine_config.timeandlog('BaseHandler.render')
def render(self, template_file, additional_dirs=None):
"""Renders a template."""
prefs = models.StudentPreferencesDAO.load_or_create()
courses.Course.set_current(self.get_course())
models.MemcacheManager.begin_readonly()
try:
template = self.get_template(
template_file, additional_dirs=additional_dirs, prefs=prefs)
self.response.out.write(template.render(self.template_value))
finally:
models.MemcacheManager.end_readonly()
courses.Course.clear_current()
# If the page displayed successfully, save the location for registered
# students so future visits to the course's base URL sends the student
# to the most-recently-visited page.
# TODO(psimakov): method called render() must not have mutations
user = self.get_user()
if user:
student = models.Student.get_enrolled_student_by_email(
user.email())
if student:
prefs.last_location = self.request.path_qs
models.StudentPreferencesDAO.save(prefs)
def get_redirect_location(self, student):
if (not student.is_transient and
(self.request.path == self.app_context.get_slug() or
self.request.path == self.app_context.get_slug() + '/' or
self.request.get('use_last_location'))): # happens on '/' page
prefs = models.StudentPreferencesDAO.load_or_create()
# Belt-and-suspenders: prevent infinite self-redirects
if (prefs.last_location and
prefs.last_location != self.request.path_qs):
return prefs.last_location
return None
class BaseRESTHandler(CourseHandler):
"""Base REST handler."""
def __init__(self, *args, **kwargs):
super(BaseRESTHandler, self).__init__(*args, **kwargs)
def assert_xsrf_token_or_fail(self, token_dict, action, args_dict):
"""Asserts that current request has proper XSRF token or fails."""
token = token_dict.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
transforms.send_json_response(
self, 403,
'Bad XSRF token. Please reload the page and try again',
args_dict)
return False
return True
def validation_error(self, message, key=None):
"""Deliver a validation message."""
if key:
transforms.send_json_response(
self, 412, message, payload_dict={'key': key})
else:
transforms.send_json_response(self, 412, message)
class PreviewHandler(BaseHandler):
"""Handler for viewing course preview."""
def get(self):
"""Handles GET requests."""
user = self.personalize_page_and_get_user()
if user is None:
student = TRANSIENT_STUDENT
else:
student = Student.get_enrolled_student_by_email(user.email())
if not student:
student = TRANSIENT_STUDENT
# If the course is browsable, or the student is logged in and
# registered, redirect to the main course page.
if ((student and not student.is_transient) or
self.app_context.get_environ()['course']['browsable']):
self.redirect('/course')
return
self.template_value['transient_student'] = True
self.template_value['can_register'] = self.app_context.get_environ(
)['reg_form']['can_register']
self.template_value['navbar'] = {'course': True}
self.template_value['units'] = self.get_units()
self.template_value['show_registration_page'] = True
course = self.app_context.get_environ()['course']
self.template_value['video_exists'] = bool(
'main_video' in course and
'url' in course['main_video'] and
course['main_video']['url'])
self.template_value['image_exists'] = bool(
'main_image' in course and
'url' in course['main_image'] and
course['main_image']['url'])
if user:
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
additional_registration_fields = self.app_context.get_environ(
)['reg_form']['additional_registration_fields']
if profile is not None and not additional_registration_fields:
self.template_value['show_registration_page'] = False
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('preview.html')
class RegisterHandler(BaseHandler):
"""Handler for course registration."""
def get(self):
"""Handles GET request."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(
users.create_login_url(self.request.uri), normalize=False)
return
student = Student.get_enrolled_student_by_email(user.email())
if student:
self.redirect('/course')
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.redirect('/course#registration_closed')
return
# pre-fill nick name from the profile if available
self.template_value['current_name'] = ''
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
if profile and profile.nick_name:
self.template_value['current_name'] = profile.nick_name
self.template_value['navbar'] = {}
self.template_value['transient_student'] = True
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('register.html')
def post(self):
"""Handles POST requests."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(
users.create_login_url(self.request.uri), normalize=False)
return
if not self.assert_xsrf_token_or_fail(self.request, 'register-post'):
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.redirect('/course#registration_closed')
return
if 'name_from_profile' in self.request.POST.keys():
profile = StudentProfileDAO.get_profile_by_user_id(user.user_id())
name = profile.nick_name
else:
name = self.request.get('form01')
Student.add_new_student_for_current_user(
name, transforms.dumps(self.request.POST.items()), self,
labels=self.request.get('labels'))
# Render registration confirmation page
self.redirect('/course#registration_confirmation')
class ForumHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled(
supports_transient_student=True)
if not student:
return
self.template_value['navbar'] = {'forum': True}
self.render('forum.html')
class StudentProfileHandler(BaseHandler):
"""Handles the click to 'Progress' link in the nav bar."""
# A list of functions which will provide extra rows in the Student Progress
# table. Each function will be passed the current handler, student, and
# course object and should return a pair of strings; the first being the
# title of the data and the second the value to display.
EXTRA_STUDENT_DATA_PROVIDERS = []
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
track_labels = models.LabelDAO.get_all_of_type(
models.LabelDTO.LABEL_TYPE_COURSE_TRACK)
course = self.get_course()
units = []
for unit in course.get_units():
# Don't show assessments that are part of units.
if course.get_parent_unit(unit.unit_id):
continue
units.append({
'unit_id': unit.unit_id,
'title': unit.title,
'labels': list(course.get_unit_track_labels(unit)),
})
name = student.name
profile = student.profile
if profile:
name = profile.nick_name
student_labels = student.get_labels_of_type(
models.LabelDTO.LABEL_TYPE_COURSE_TRACK)
self.template_value['navbar'] = {'progress': True}
self.template_value['student'] = student
self.template_value['student_name'] = name
self.template_value['date_enrolled'] = student.enrolled_on.strftime(
HUMAN_READABLE_DATE_FORMAT)
self.template_value['score_list'] = course.get_all_scores(student)
self.template_value['overall_score'] = course.get_overall_score(student)
self.template_value['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-edit'))
self.template_value['can_edit_name'] = (
not models.CAN_SHARE_STUDENT_PROFILE.value)
self.template_value['track_labels'] = track_labels
self.template_value['student_labels'] = student_labels
self.template_value['units'] = units
self.template_value['track_env'] = transforms.dumps({
'label_ids': [label.id for label in track_labels],
'units': units
})
# Append any extra data which is provided by modules
extra_student_data = []
for data_provider in self.EXTRA_STUDENT_DATA_PROVIDERS:
extra_student_data.append(data_provider(self, student, course))
self.template_value['extra_student_data'] = extra_student_data
self.render('student_profile.html')
class StudentEditStudentHandler(BaseHandler):
"""Handles edits to student records by students."""
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-edit'):
return
Student.rename_current(self.request.get('name'))
self.redirect('/student/home')
class StudentSetTracksHandler(BaseHandler):
"""Handles submission of student tracks selections."""
def post(self):
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-edit'):
return
all_track_label_ids = models.LabelDAO.get_set_of_ids_of_type(
models.LabelDTO.LABEL_TYPE_COURSE_TRACK)
new_track_label_ids = set(
[int(label_id)
for label_id in self.request.get_all('labels')
if label_id and int(label_id) in all_track_label_ids])
student_label_ids = set(
[int(label_id)
for label_id in common_utils.text_to_list(student.labels)
if label_id])
# Remove all existing track (and only track) labels from student,
# then merge in selected set from form.
student_label_ids = student_label_ids.difference(all_track_label_ids)
student_label_ids = student_label_ids.union(new_track_label_ids)
models.Student.set_labels_for_current(
common_utils.list_to_text(list(student_label_ids)))
self.redirect('/student/home')
class StudentUnenrollHandler(BaseHandler):
"""Handler for students to unenroll themselves."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
self.template_value['student'] = student
self.template_value['navbar'] = {}
self.template_value['student_unenroll_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-unenroll'))
self.render('unenroll_confirmation_check.html')
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-unenroll'):
return
Student.set_enrollment_status_for_current(False)
self.template_value['navbar'] = {}
self.template_value['transient_student'] = True
self.render('unenroll_confirmation.html')
class StudentLocaleRESTHandler(BaseRESTHandler):
"""REST handler to manage student setting their preferred locale."""
XSRF_TOKEN_NAME = 'locales'
def post(self):
request = transforms.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_TOKEN_NAME, {}):
return
selected = request['payload']['selected']
if selected not in self.app_context.get_allowed_locales():
transforms.send_json_response(self, 401, 'Bad locale')
return
prefs = models.StudentPreferencesDAO.load_or_create()
if prefs:
# Store locale in StudentPreferences for logged-in users
prefs.locale = selected
models.StudentPreferencesDAO.save(prefs)
else:
# Store locale in cookie for out-of-session users
self.response.set_cookie(
GUEST_LOCALE_COOKIE, selected,
max_age=GUEST_LOCALE_COOKIE_MAX_AGE_SEC)
transforms.send_json_response(self, 200, 'OK')
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enables hosting of multiple courses in one application instance.
We used to allow hosting of only one course in one Google App Engine instance.
Now we allow hosting of many courses simultaneously. To configure multiple
courses one must set an environment variable in app.yaml file, for example:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/coursea:/courses/a, course:/courseb:/courses/b'
...
This variable holds a ',' or newline separated list of course entries. Each
course entry has four ':' separated parts: the word 'course', the URL prefix,
and the file system location for the site files. If the third part is empty,
the course assets are stored in a datastore instead of the file system. The
fourth, optional part, is the name of the course namespace.
The URL prefix specifies, how will the course URL appear in the browser. In the
example above, the courses will be mapped to http://www.example.com[/coursea]
and http://www.example.com[/courseb].
The file system location of the files specifies, which files to serve for the
course. For each course we expect three sub-folders: 'assets', 'views', and
'data'. The 'data' folder must contain the CSV files that define the course
layout, the 'assets' and 'views' should contain the course specific files and
jinja2 templates respectively. In the example above, the course files are
expected to be placed into folders '/courses/a' and '/courses/b' of your Google
App Engine installation respectively. If this value is absent a datastore is
used to store course assets, not the file system.
By default Course Builder handles static '/assets' files using a custom
handler. You may choose to handle '/assets' files of your course as 'static'
files using Google App Engine handler. You can do so by creating a new static
file handler entry in your app.yaml and placing it before our main course
handler.
If you have an existing course developed using Course Builder and do NOT want
to host multiple courses, there is nothing for you to do. A following default
rule is silently created for you:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/:/'
...
It sets the '/' as the base URL for the course, uses root folder of your Google
App Engine installation to look for course /assets/..., /data/..., and
/views/... and uses blank datastore and memcache namespace. All in all,
everything behaves just as it did in the prior version of Course Builder when
only one course was supported.
If you have existing course developed using Course Builder and DO want to start
hosting multiple courses here are the steps. First, define the courses
configuration environment variable as described above. Second, copy existing
'assets', 'data' and 'views' folders of your course into the new location, for
example '/courses/mycourse'.
If you have an existing course built on a previous version of Course Builder
and you now decided to use new URL prefix, which is not '/', you will need
to update your old course html template and JavaScript files. You typically
would have to make two modifications. First, replace all absolute URLs with
the relative URLs. For example, if you had <a href='/forum'>..</a>, you will
need to replace it with <a href='forum'>..</a>. Second, you need to add <base>
tag at the top of you course 'base.html' and 'base_registration.html' files,
like this:
...
<head>
<base href="{{ gcb_course_base }}" />
...
Current Course Builder release already has all these modifications.
Note, that each 'course' runs in a separate Google App Engine namespace. The
name of the namespace is derived from the course files location. In the example
above, the course files are stored in the folder '/courses/a', which be mapped
to the namespace name 'gcb-courses-a'. The namespaces can't contain '/', so we
replace them with '-' and prefix the namespace with the project abbreviation
'gcb'. Remember these namespace names, you will need to use them if/when
accessing server administration panel, viewing objects in the datastore, etc.
Don't move the files to another folder after your course starts as a new folder
name will create a new namespace name and old data will no longer be used. You
are free to rename the course URL prefix at any time. Once again, if you are
not hosting multiple courses, your course will run in a default namespace
(None).
Good luck!
"""
import logging
import mimetypes
import os
import posixpath
import re
import threading
import traceback
import urlparse
import zipfile
import utils
import webapp2
from webapp2_extras import i18n
import appengine_config
from common import caching
from common import safe_dom
from models import models
from models import transforms
from models.config import ConfigProperty
from models.config import ConfigPropertyEntity
from models.config import Registry
from models.counters import PerfCounter
from models.courses import Course
from models.roles import Roles
from models.vfs import AbstractFileSystem
from models.vfs import DatastoreBackedFileSystem
from models.vfs import LocalReadOnlyFileSystem
from modules.courses import courses as courses_module
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import zipserve
# base name for all course namespaces
GCB_BASE_COURSE_NAMESPACE = 'gcb-course'
# these folder and file names are reserved
GCB_ASSETS_FOLDER_NAME = os.path.normpath('/assets/')
GCB_VIEWS_FOLDER_NAME = os.path.normpath('/views/')
GCB_DATA_FOLDER_NAME = os.path.normpath('/data/')
GCB_CONFIG_FILENAME = os.path.normpath('/course.yaml')
# modules do have files that must be inheritable, like oeditor.html
GCB_MODULES_FOLDER_NAME = os.path.normpath('/modules/')
# Files in these folders are inheritable between file systems.
GCB_INHERITABLE_FOLDER_NAMES = [
os.path.join(GCB_ASSETS_FOLDER_NAME, 'css/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'img/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'lib/'),
os.path.join(GCB_ASSETS_FOLDER_NAME, 'html/'),
GCB_VIEWS_FOLDER_NAME,
GCB_MODULES_FOLDER_NAME]
# supported site types
SITE_TYPE_COURSE = 'course'
# default 'Cache-Control' HTTP header for static files
DEFAULT_CACHE_CONTROL_MAX_AGE = 600
DEFAULT_CACHE_CONTROL_PUBLIC = 'public'
# default HTTP headers for dynamic responses
DEFAULT_EXPIRY_DATE = 'Mon, 01 Jan 1990 00:00:00 GMT'
DEFAULT_PRAGMA = 'no-cache'
# thread local storage for current request PATH_INFO
PATH_INFO_THREAD_LOCAL = threading.local()
# performance counters
STATIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-static',
'A number of times request was served via static handler.')
DYNAMIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-dynamic',
'A number of times request was served via dynamic handler.')
ZIP_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-zip',
'A number of times request was served via zip handler.')
NO_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-none',
'A number of times request was not matched to any handler.')
HTTP_BYTES_IN = PerfCounter(
'gcb-sites-bytes-in',
'A number of bytes received from clients by the handler.')
HTTP_BYTES_OUT = PerfCounter(
'gcb-sites-bytes-out',
'A number of bytes sent out from the handler to clients.')
HTTP_STATUS_200 = PerfCounter(
'gcb-sites-http-20x',
'A number of times HTTP status code 20x was returned.')
HTTP_STATUS_300 = PerfCounter(
'gcb-sites-http-30x',
'A number of times HTTP status code 30x was returned.')
HTTP_STATUS_400 = PerfCounter(
'gcb-sites-http-40x',
'A number of times HTTP status code 40x was returned.')
HTTP_STATUS_500 = PerfCounter(
'gcb-sites-http-50x',
'A number of times HTTP status code 50x was returned.')
COUNTER_BY_HTTP_CODE = {
200: HTTP_STATUS_200, 300: HTTP_STATUS_300, 400: HTTP_STATUS_400,
500: HTTP_STATUS_500}
def count_stats(handler):
"""Records statistics about the request and the response."""
try:
# Record request bytes in.
if handler.request and handler.request.content_length:
HTTP_BYTES_IN.inc(handler.request.content_length)
# Record response HTTP status code.
if handler.response and handler.response.status_int:
rounded_status_code = (handler.response.status_int / 100) * 100
counter = COUNTER_BY_HTTP_CODE[rounded_status_code]
if not counter:
logging.error(
'Unknown HTTP status code: %s.',
handler.response.status_code)
else:
counter.inc()
# Record response bytes out.
if handler.response and handler.response.content_length:
HTTP_BYTES_OUT.inc(handler.response.content_length)
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to count_stats(): %s\n%s', e, traceback.format_exc())
def _validate_appcontext_list(contexts, strict=False):
"""Validates a list of application contexts."""
# Check rule order is enforced. If we allowed any order and '/a' was before
# '/aa', the '/aa' would never match.
for i in range(len(contexts)):
for j in range(i + 1, len(contexts)):
above = contexts[i]
below = contexts[j]
if below.get_slug().startswith(above.get_slug()):
raise Exception(
'Please reorder course entries to have '
'\'%s\' before \'%s\'.' % (
below.get_slug(), above.get_slug()))
# Make sure '/' is mapped.
if strict:
is_root_mapped = False
for context in contexts:
if context.slug == '/':
is_root_mapped = True
break
if not is_root_mapped:
raise Exception(
'Please add an entry with \'/\' as course URL prefix.')
def path_join(base, path):
"""Joins 'base' and 'path' ('path' is interpreted as a relative path).
This method is like os.path.join(), but 'path' is interpreted relatively.
E.g., os.path.join('/a/b', '/c') yields '/c', but this function yields
'/a/b/c'.
Args:
base: The base path.
path: The path to append to base; this is treated as a relative path.
Returns:
The path obtaining by appending 'path' to 'base'.
"""
if os.path.isabs(path):
# Remove drive letter (if we are on Windows).
unused_drive, path_no_drive = os.path.splitdrive(path)
# Remove leading path separator.
path = path_no_drive[1:]
return AbstractFileSystem.normpath(os.path.join(base, path))
def abspath(home_folder, filename):
"""Creates an absolute URL for a filename in a home folder."""
return path_join(appengine_config.BUNDLE_ROOT,
path_join(home_folder, filename))
def unprefix(path, prefix):
"""Remove the prefix from path. Append '/' if an empty string results."""
if not path.startswith(prefix):
raise Exception('Not prefixed.')
if prefix != '/':
path = path[len(prefix):]
if not path:
path = '/'
return path
def set_static_resource_cache_control(handler):
"""Properly sets Cache-Control for a WebOb/webapp2 response."""
handler.response.cache_control.no_cache = None
handler.response.cache_control.public = DEFAULT_CACHE_CONTROL_PUBLIC
handler.response.cache_control.max_age = DEFAULT_CACHE_CONTROL_MAX_AGE
def set_default_response_headers(handler):
"""Sets the default headers for outgoing responses."""
# This conditional is needed for the unit tests to pass, since their
# handlers do not have a response attribute.
if handler.response:
# Only set the headers for dynamic responses. This happens precisely
# when the handler is an instance of utils.ApplicationHandler and not
# AssetsHandler
if isinstance(handler, AssetHandler):
return
if not isinstance(handler, utils.ApplicationHandler):
return
handler.response.cache_control.no_cache = True
handler.response.cache_control.must_revalidate = True
handler.response.expires = DEFAULT_EXPIRY_DATE
handler.response.pragma = DEFAULT_PRAGMA
def make_zip_handler(zipfilename):
"""Creates a handler that serves files from a zip file."""
class CustomZipHandler(zipserve.ZipHandler):
"""Custom ZipHandler that properly controls caching."""
def get(self, *args):
"""Handles GET request."""
path = None
# try to use path passed explicitly
if args and len(args) >= 1:
path = args[0]
# use path_translated if no name was passed explicitly
if not path:
path = self.path_translated
# we need to remove leading slash and all filenames inside zip
# file must be relative
if path and path.startswith('/') and len(path) > 1:
path = path[1:]
if not path:
self.error(404)
return
ZIP_HANDLER_COUNT.inc()
self.ServeFromZipFile(zipfilename, path)
count_stats(self)
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
return CustomZipHandler
class CssComboZipHandler(zipserve.ZipHandler):
"""A handler which combines a files served from a zip file.
The paths for the files within the zip file are presented
as query parameters.
"""
zipfile_cache = {}
def get(self):
raise NotImplementedError()
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
def serve_from_zip_file(self, zipfilename, static_file_handler):
"""Assemble the download by reading file from zip file."""
zipfile_object = self.zipfile_cache.get(zipfilename)
if zipfile_object is None:
try:
zipfile_object = zipfile.ZipFile(zipfilename)
except (IOError, RuntimeError, zipfile.BadZipfile), err:
# If the zipfile can't be opened, that's probably a
# configuration error in the app, so it's logged as an error.
logging.error('Can\'t open zipfile %s: %s', zipfilename, err)
zipfile_object = '' # Special value to cache negative results.
self.zipfile_cache[zipfilename] = zipfile_object
if not zipfile_object:
self.error(404)
return
all_content_types = set()
for name in self.request.GET:
all_content_types.add(mimetypes.guess_type(name))
if len(all_content_types) == 1:
content_type = all_content_types.pop()[0]
else:
content_type = 'text/plain'
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders()
for name in self.request.GET:
try:
content = zipfile_object.read(name)
if content_type == 'text/css':
content = self._fix_css_paths(
name, content, static_file_handler)
self.response.out.write(content)
except (KeyError, RuntimeError), err:
logging.error('Not found %s in %s', name, zipfilename)
def _fix_css_paths(self, path, css, static_file_handler):
"""Transform relative url() settings in CSS to absolute.
This is necessary because a url setting, e.g., url(foo.png), is
interpreted as relative to the location of the CSS file. However
in the case of a bundled CSS file, obtained from a URL such as
http://place.com/cb/combo?a/b/c/foo.css
the browser would believe that the location for foo.png was
http://place.com/cb/foo.png
and not
http://place.com/cb/a/b/c/foo.png
Thus we transform the url from
url(foo.png)
to
url(/static_file_service/a/b/c/foo.png)
Args:
path: the path to the CSS file within the ZIP file
css: the content of the CSS file
static_file_handler: the base handler to serve the referenced file
Returns:
The CSS with all relative URIs rewritten to absolute URIs.
"""
base = static_file_handler + posixpath.split(path)[0] + '/'
css = css.decode('utf-8')
css = re.sub(r'url\(([^http|^https]\S+)\)', r'url(%s\1)' % base, css)
return css
def make_css_combo_zip_handler(zipfilename, static_file_handler):
class CustomCssComboZipHandler(CssComboZipHandler):
def get(self):
self.serve_from_zip_file(zipfilename, static_file_handler)
return CustomCssComboZipHandler
class AssetHandler(utils.BaseHandler):
"""Handles serving of static resources located on the file system."""
def __init__(self, app_context, filename):
super(AssetHandler, self).__init__()
self.app_context = app_context
self.filename = filename
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def _can_view(self, fs, stream):
"""Checks if current user can view stream."""
public = not fs.is_draft(stream)
return public or Roles.is_course_admin(self.app_context)
def get(self):
"""Handles GET requests."""
models.MemcacheManager.begin_readonly()
try:
stream = self.app_context.fs.open(self.filename)
if not stream:
self.error(404)
return
if not self._can_view(self.app_context.fs, stream):
self.error(403)
return
set_static_resource_cache_control(self)
self.response.headers['Content-Type'] = self.get_mime_type(
self.filename)
self.response.write(stream.read())
finally:
models.MemcacheManager.end_readonly()
class CourseIndex(object):
"""A list of all application contexts."""
CAN_USE_INDEXED_GETTER = True
@appengine_config.timeandlog('CourseIndex.init', duration_only=True)
def __init__(self, all_contexts):
self._all_contexts = all_contexts
self._namespace2app_context = {}
self._slug_parts2app_context = {}
self._reindex()
@classmethod
def _slug_to_parts(cls, path):
"""Split slug into parts; slug parts are '/' separated."""
if path in ['/', '']:
return None
_parts = path.split('/')
assert _parts[0] == ''
_parts.pop(0)
return _parts
@classmethod
def _validate_and_split_path_to_parts(cls, path):
"""Split path into parts; path parts are '/' separated."""
if path in ['/', '']:
return True, None
_parts = path.split('/')
if _parts[0] != '':
return False, None
_parts.pop(0)
return True, _parts
def _update_slug_parts_index(self, app_context):
"""An index is a tree keyed by slug part."""
_parts = self._slug_to_parts(app_context.get_slug())
_parent = self._slug_parts2app_context
while True:
if not _parts:
_parent[None] = app_context
break
_part = _parts.pop(0)
_node = _parent.get(_part)
if not _node:
_node = {_part: {}}
_parent.update(_node)
_parent = _parent[_part]
def _get_course_for_path_via_index(self, path):
_result = None
_valid, _parts = self._validate_and_split_path_to_parts(path)
if not _valid:
return None
_parent = self._slug_parts2app_context
while True:
if not _parts:
if _parent:
_result = _parent.get(None)
break
_part = _parts.pop(0)
_node = _parent.get(_part)
if not _node:
if _parent:
_result = _parent.get(None)
break
_parent = _node
if not _result:
debug('No mapping for: %s' % path)
return _result
def _reindex(self):
for app_context in self._all_contexts:
self._update_slug_parts_index(app_context)
self._namespace2app_context[app_context.get_namespace_name()] = (
app_context)
def get_all_courses(self):
return self._all_contexts
def _get_course_for_path_linear(self, path):
for app_context in self._all_contexts:
if (path == app_context.get_slug() or
path.startswith('%s/' % app_context.get_slug()) or
app_context.get_slug() == '/'):
return app_context
debug('No mapping for: %s' % path)
return None
def get_app_context_for_namespace(self, namespace):
return self._namespace2app_context.get(namespace)
def get_course_for_path(self, path):
if CourseIndex.CAN_USE_INDEXED_GETTER:
return self._get_course_for_path_via_index(path)
else:
return self._get_course_for_path_linear(path)
def debug(message):
if ApplicationContext.DEBUG_INFO:
logging.info(message)
class ApplicationContext(object):
"""An application context for a request/response."""
# if True we auto-deploy filesystem-based default course
AUTO_DEPLOY_DEFAULT_COURSE = False
# enabled debug info output
DEBUG_INFO = False
# Here we store a map of a text definition of the courses to be parsed, and
# a corresponding CourseIndex.
_COURSE_INDEX_CACHE = {}
@classmethod
def get_namespace_name_for_request(cls):
"""Gets the name of the namespace to use for this request.
(Examples of such namespaces are NDB and memcache.)
Returns:
The namespace for the current request, or None if no course matches
the current request context path.
"""
course = get_course_for_current_request()
if course:
return course.namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def after_create(cls, instance):
"""Override this method to manipulate freshly created instance."""
pass
def __init__(self, site_type, slug, homefolder, namespace, fs, raw=None):
"""Creates new application context.
Args:
site_type: Specifies the type of context. Must be 'course' for now.
slug: A common context path prefix for all URLs in the context.
homefolder: A folder with the assets belonging to this context.
namespace: A name of a datastore namespace for use by this context.
fs: A file system object to be used for accessing homefolder.
raw: A raw representation of this course rule (course:/:/).
Returns:
The new instance of namespace object.
"""
self.type = site_type
self.slug = slug
self.homefolder = homefolder
self.namespace = namespace
self._fs = fs
self._raw = raw
self._cached_environ = None
self._locale_threadlocal = threading.local()
self.clear_per_request_cache()
self.after_create(self)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
"""Two ApplicationContexts are the same if: same slug and namespace."""
if not isinstance(other, ApplicationContext):
return False
app_context_1 = self
app_context_2 = other
if app_context_1 is app_context_2:
return True
if app_context_1 and app_context_2:
same_ns = (
app_context_1.get_namespace_name() ==
app_context_2.get_namespace_name())
same_slug = app_context_1.get_slug() == app_context_2.get_slug()
return same_ns and same_slug
return False
@classmethod
def clear_per_process_cache(cls):
"""Clears all objects from global in-process cache."""
cls._COURSE_INDEX_CACHE = {}
caching.ProcessScopedSingleton.clear_all()
def clear_per_request_cache(self):
"""Clears all objects cached per request."""
self._cached_environ = None
caching.RequestScopedSingleton.clear_all()
@ property
def raw(self):
return self._raw
@ property
def fs(self):
return self._fs
@property
def now_available(self):
course = self.get_environ().get('course')
return course and course.get('now_available')
@property
def whitelist(self):
course = self.get_environ().get('course')
return '' if not course else course.get('whitelist', '')
def set_current_locale(self, locale):
old_locale = self.get_current_locale()
if locale != old_locale:
self._locale_threadlocal.locale = locale
self.clear_per_request_cache()
def get_current_locale(self):
# we cache instances of this object between requests; it's possible
# that new thread reuses the object and has no threadlocal initialized
if not hasattr(self._locale_threadlocal, 'locale'):
self._locale_threadlocal.locale = None
return self._locale_threadlocal.locale
@property
def default_locale(self):
course_settings = self.get_environ().get('course')
if not course_settings:
return None
return course_settings.get('locale')
def get_title(self):
try:
return self.get_environ()['course']['title']
except KeyError:
return 'UNTITLED'
def get_namespace_name(self):
return self.namespace
def get_home_folder(self):
return self.homefolder
def get_slug(self):
return self.slug
def get_config_filename(self):
"""Returns absolute location of a course configuration file."""
filename = abspath(self.get_home_folder(), GCB_CONFIG_FILENAME)
debug('Config file: %s' % filename)
return filename
def get_environ(self):
return Course.get_environ(self)
def get_home(self):
"""Returns absolute location of a course folder."""
path = abspath(self.get_home_folder(), '')
return path
def get_template_home(self):
"""Returns absolute location of a course template folder."""
path = abspath(self.get_home_folder(), GCB_VIEWS_FOLDER_NAME)
return path
def get_data_home(self):
"""Returns absolute location of a course data folder."""
path = abspath(self.get_home_folder(), GCB_DATA_FOLDER_NAME)
return path
def gettext(self, text):
"""Render localized text in the default locale.
This method should be used in place of gettext.gettext, as it will
set the locale correctly.
Args:
text: str. The text to be localized.
Returns:
Localized text, or the original string, if no localization exists.
"""
try:
translator = i18n.get_i18n()
translator.set_locale(self.get_current_locale())
return translator.gettext(text)
except Exception: # pylint: disable=broad-except
logging.exception('Unable to translate %s', text)
return text
def get_template_environ(self, locale, additional_dirs):
"""Create and configure jinja template evaluation environment."""
template_dir = self.get_template_home()
dirs = [template_dir]
if additional_dirs:
dirs += additional_dirs
jinja_environment = self.fs.get_jinja_environ(dirs)
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
return jinja_environment
def is_editable_fs(self):
return self._fs.impl.__class__ == DatastoreBackedFileSystem
def can_pick_all_locales(self):
return courses_module.can_pick_all_locales(self)
def get_allowed_locales(self):
environ = self.get_environ()
default_locale = environ['course'].get('locale')
extra_locales = environ.get('extra_locales', [])
return [default_locale] + [
loc['locale'] for loc in extra_locales
if loc['locale'] != default_locale and (
loc[Course.SCHEMA_LOCALE_AVAILABILITY] == (
Course.SCHEMA_LOCALE_AVAILABILITY_AVAILABLE)
or self.can_pick_all_locales())]
def get_all_locales(self):
"""Returns _all_ locales, whether enabled or not. Dashboard only."""
environ = self.get_environ()
default_locale = self.default_locale
extra_locales = environ.get('extra_locales', [])
return [default_locale] + [loc['locale'] for loc in extra_locales]
@classmethod
def is_absolute_url(cls, url):
return bool(urlparse.urlparse(url).scheme)
def canonicalize_url(self, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
is_relative = (
not self.is_absolute_url(location) and
not location.startswith(self.get_slug()))
has_slug = (
self.get_slug() and self.get_slug() != '/')
if is_relative and has_slug:
location = '%s%s' % (self.get_slug(), location)
return location
def has_path_info():
"""Checks if PATH_INFO is defined for the thread local."""
return hasattr(PATH_INFO_THREAD_LOCAL, 'path')
def set_path_info(path):
"""Stores PATH_INFO in thread local."""
if not path:
raise Exception('Use \'unset()\' instead.')
if has_path_info():
raise Exception('Expected no path set.')
try:
PATH_INFO_THREAD_LOCAL.path = path
PATH_INFO_THREAD_LOCAL.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(
ApplicationContext.get_namespace_name_for_request())
finally:
try:
caching.RequestScopedSingleton.clear_all()
finally:
models.MemcacheManager.clear_readonly_cache()
def get_path_info():
"""Gets PATH_INFO from thread local."""
return PATH_INFO_THREAD_LOCAL.path
def unset_path_info():
"""Removed PATH_INFO from thread local."""
if not has_path_info():
raise Exception('Expected valid path already set.')
try:
models.MemcacheManager.clear_readonly_cache()
finally:
try:
caching.RequestScopedSingleton.clear_all()
finally:
try:
app_context = get_course_for_current_request()
if app_context:
app_context.clear_per_request_cache()
finally:
namespace_manager.set_namespace(
PATH_INFO_THREAD_LOCAL.old_namespace)
del PATH_INFO_THREAD_LOCAL.old_namespace
del PATH_INFO_THREAD_LOCAL.path
def _build_course_list_from(rules_text, create_vfs=True):
"""Compute the list of contexts from the text rules."""
if not rules_text:
return []
rules_text = rules_text.replace(',', '\n')
rules = rules_text.split('\n')
slugs = {}
namespaces = {}
all_contexts = []
folder = None
for rule in rules:
rule = rule.strip()
if not rule or rule.startswith('#'):
continue
parts = rule.split(':')
# validate length
if len(parts) < 3:
raise Exception('Expected rule definition of the form '
' \'type:slug:folder[:ns]\', got %s: ' % rule)
# validate type
if parts[0] != SITE_TYPE_COURSE:
raise Exception('Expected \'%s\', found: \'%s\'.'
% (SITE_TYPE_COURSE, parts[0]))
site_type = parts[0]
# validate slug
slug = parts[1]
slug_parts = urlparse.urlparse(slug)
if slug != slug_parts[2]:
raise Exception(
'Bad rule: \'%s\'. '
'Course URL prefix \'%s\' must be a simple URL fragment.' % (
rule, slug))
if slug in slugs:
raise Exception(
'Bad rule: \'%s\'. '
'Course URL prefix \'%s\' is already defined.' % (rule, slug))
slugs[slug] = True
# validate folder name
if parts[2]:
folder = parts[2]
create_fs = lambda unused_ns: LocalReadOnlyFileSystem(
logical_home_folder=folder)
else:
folder = '/'
create_fs = lambda ns: DatastoreBackedFileSystem(
ns=ns,
logical_home_folder=appengine_config.BUNDLE_ROOT,
inherits_from=LocalReadOnlyFileSystem(logical_home_folder='/'),
inheritable_folders=GCB_INHERITABLE_FOLDER_NAMES)
# validate or derive namespace
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
if len(parts) == 4:
namespace = parts[3]
else:
if folder and folder != '/':
namespace = '%s%s' % (GCB_BASE_COURSE_NAMESPACE,
folder.replace('/', '-'))
try:
namespace_manager.validate_namespace(namespace)
except Exception as e:
raise Exception(
'Error validating namespace "%s" in rule "%s"; %s.' % (
namespace, rule, e))
if namespace in namespaces:
raise Exception(
'Bad rule \'%s\'. '
'Namespace \'%s\' is already defined.' % (rule, namespace))
namespaces[namespace] = True
vfs = None
if create_vfs:
vfs = AbstractFileSystem(create_fs(namespace))
all_contexts.append(ApplicationContext(
site_type, slug, folder, namespace, vfs, raw=rule))
_validate_appcontext_list(all_contexts)
return all_contexts
def get_course_index(rules_text=None):
"""Build course index given a text of course definition rules."""
if not rules_text:
rules_text = GCB_COURSES_CONFIG.value
if not ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE and (
rules_text == GCB_COURSES_CONFIG.default_value) and (
not Registry.get_overrides().get(GCB_COURSES_CONFIG.name)):
return CourseIndex([])
rules_text = rules_text.replace(',', '\n')
# pylint: disable=protected-access
course_index = ApplicationContext._COURSE_INDEX_CACHE.get(rules_text)
if course_index:
return course_index
course_index = CourseIndex(_build_course_list_from(rules_text))
# pylint: disable=protected-access
ApplicationContext._COURSE_INDEX_CACHE = {rules_text: course_index}
return course_index
def get_app_context_for_namespace(namespace):
"""Chooses the app_context that matches a namespace."""
app_context = get_course_index().get_app_context_for_namespace(namespace)
if not app_context:
debug('No app_context in namespace: %s' % namespace)
return app_context
def get_course_for_path(path):
"""Chooses app_context that matches a context path."""
return get_course_index().get_course_for_path(path)
def get_course_for_current_request():
"""Chooses app_context that matches current request context path."""
if not has_path_info():
return None
return get_course_for_path(get_path_info())
def get_all_courses(rules_text=None):
course_index = get_course_index(rules_text)
return course_index.get_all_courses()
def _courses_config_validator(rules_text, errors, expect_failures=True):
"""Validates a textual definition of courses entries."""
try:
_validate_appcontext_list(
_build_course_list_from(rules_text, create_vfs=False))
return True
except Exception as e: # pylint: disable=broad-except
if not expect_failures:
logging.error('%s\n%s', e, traceback.format_exc())
errors.append(str(e))
return False
def validate_new_course_entry_attributes(name, title, admin_email, errors):
"""Validates new course attributes."""
if not name or len(name) < 3:
errors.append(
'The unique name associated with the course must be at least '
'three characters long.')
if not re.match('[_a-z0-9]+$', name, re.IGNORECASE):
errors.append(
'The unique name associated with the course should contain only '
'lowercase letters, numbers, or underscores.')
if not title or len(title) < 3:
errors.append('The course title is too short.')
if not admin_email or '@' not in admin_email:
errors.append('Please enter a valid email address.')
@db.transactional()
def _add_new_course_entry_to_persistent_configuration(raw):
"""Adds new raw course entry definition to the datastore settings.
This loads all current datastore course entries and adds a new one. It
also find the best place to add the new entry at the further down the list
the better, because entries are applied in the order of declaration.
Args:
raw: The course entry rule: 'course:/foo::ns_foo'.
Returns:
True if added, False if not. False almost always means a duplicate rule.
"""
# Get all current entries from a datastore.
entity = ConfigPropertyEntity.get_by_key_name(GCB_COURSES_CONFIG.name)
if not entity:
entity = ConfigPropertyEntity(key_name=GCB_COURSES_CONFIG.name)
entity.is_draft = False
if not entity.value:
entity.value = GCB_COURSES_CONFIG.value
if entity.value == GCB_COURSES_CONFIG.default_value:
entity.value = ''
lines = entity.value.splitlines()
# Add new entry to the rest of the entries. Since entries are matched
# in the order of declaration, try to find insertion point further down.
final_lines_text = None
for index in reversed(range(0, len(lines) + 1)):
# Create new rule list putting new item at index position.
new_lines = lines[:]
new_lines.insert(index, raw)
new_lines_text = '\n'.join(new_lines)
# Validate the rule list definition.
if _courses_config_validator(new_lines_text, [], expect_failures=True):
final_lines_text = new_lines_text
break
# Save updated course entries.
if final_lines_text:
entity.value = final_lines_text
entity.put()
return True
return False
def add_new_course_entry(unique_name, title, admin_email, errors):
"""Validates course attributes and adds the course."""
# Validate.
validate_new_course_entry_attributes(
unique_name, title, admin_email, errors)
if errors:
return
# Create new entry and check it is valid.
raw = 'course:/%s::ns_%s' % (unique_name, unique_name)
try:
get_all_courses(rules_text=raw)
except Exception as e: # pylint: disable=broad-except
errors.append('Failed to add entry: %s.\n%s' % (raw, e))
if errors:
return
# Add new entry to persistence.
if not _add_new_course_entry_to_persistent_configuration(raw):
errors.append(
'Unable to add new entry \'%s\'. Entry with the '
'same name \'%s\' already exists.' % (raw, unique_name))
return
return raw
GCB_COURSES_CONFIG = ConfigProperty(
'gcb_courses_config', str,
safe_dom.NodeList().append(
safe_dom.Element('p').add_text("""
A newline separated list of course entries. Each course entry has
four parts, separated by colons (':'). The four parts are:""")
).append(
safe_dom.Element('ol').add_child(
safe_dom.Element('li').add_text(
'The word \'course\', which is a required element.')
).add_child(
safe_dom.Element('li').add_text("""
A unique course URL prefix. Examples could be '/cs101' or '/art'.
Default: '/'""")
).add_child(
safe_dom.Element('li').add_text("""
A file system location of course asset files. If location is left empty,
the course assets are stored in a datastore instead of the file system. A course
with assets in a datastore can be edited online. A course with assets on file
system must be re-deployed to Google App Engine manually.""")
).add_child(
safe_dom.Element('li').add_text("""
A course datastore namespace where course data is stored in App Engine.
Note: this value cannot be changed after the course is created."""))
).append(
safe_dom.Text(
'For example, consider the following two course entries:')
).append(safe_dom.Element('br')).append(
safe_dom.Element('blockquote').add_text(
'course:/cs101::ns_cs101'
).add_child(
safe_dom.Element('br')
).add_text('course:/:/')
).append(
safe_dom.Element('p').add_text("""
Assuming you are hosting Course Builder on http:/www.example.com, the first
entry defines a course on a http://www.example.com/cs101 and both its assets
and student data are stored in the datastore namespace 'ns_cs101'. The second
entry defines a course hosted on http://www.example.com/, with its assets
stored in the '/' folder of the installation and its data stored in the default
empty datastore namespace.""")
).append(
safe_dom.Element('p').add_text("""
A line that starts with '#' is ignored. Course entries are applied in the
order they are defined.""")
), 'course:/:/:', multiline=True, validator=_courses_config_validator)
class ApplicationRequestHandler(webapp2.RequestHandler):
"""Handles dispatching of all URL's to proper handlers."""
# WARNING! never set this value to True, unless for the production load
# tests; setting this value to True will allow any anonymous third party to
# act as a Course Builder superuser
CAN_IMPERSONATE = False
# the name of the impersonation header
IMPERSONATE_HEADER_NAME = 'Gcb-Impersonate'
def dispatch(self):
if self.CAN_IMPERSONATE:
self.impersonate_and_dispatch()
else:
super(ApplicationRequestHandler, self).dispatch()
def impersonate_and_dispatch(self):
"""Dispatches request with user impersonation."""
impersonate_info = self.request.headers.get(
self.IMPERSONATE_HEADER_NAME)
if not impersonate_info:
super(ApplicationRequestHandler, self).dispatch()
return
impersonate_info = transforms.loads(impersonate_info)
email = impersonate_info.get('email')
user_id = impersonate_info.get('user_id')
def get_impersonated_user():
"""A method that returns impersonated user."""
try:
return users.User(email=email, _user_id=user_id)
except users.UserNotFoundError:
return None
old_get_current_user = users.get_current_user
try:
logging.info('Impersonating %s.', email)
users.get_current_user = get_impersonated_user
super(ApplicationRequestHandler, self).dispatch()
return
finally:
users.get_current_user = old_get_current_user
@classmethod
def bind_to(cls, urls, urls_map):
"""Recursively builds a map from a list of (URL, Handler) tuples."""
for url in urls:
path_prefix = url[0]
handler = url[1]
urls_map[path_prefix] = handler
# add child handlers
if hasattr(handler, 'get_child_routes'):
cls.bind_to(handler.get_child_routes(), urls_map)
@classmethod
def bind(cls, urls):
urls_map = {}
cls.bind_to(urls, urls_map)
cls.urls_map = urls_map
def get_handler(self):
"""Finds a course suitable for handling this request."""
course = get_course_for_current_request()
if not course:
return None
path = get_path_info()
if not path:
return None
return self.get_handler_for_course_type(
course, unprefix(path, course.get_slug()))
def can_handle_course_requests(self, context):
"""Reject all, but authors requests, to an unpublished course."""
return ((context.now_available and Roles.is_user_whitelisted(context))
or Roles.is_course_admin(context))
def _get_handler_factory_for_path(self, path):
"""Picks a handler to handle the path."""
# Checks if path maps in its entirety.
if path in ApplicationRequestHandler.urls_map:
return ApplicationRequestHandler.urls_map[path]
# Check if partial path maps. For now, let only zipserve.ZipHandler
# handle partial matches. We want to find the longest possible match.
parts = path.split('/')
candidate = None
partial_path = ''
for part in parts:
if part:
partial_path += '/' + part
if partial_path in ApplicationRequestHandler.urls_map:
handler = ApplicationRequestHandler.urls_map[partial_path]
if (
isinstance(handler, zipserve.ZipHandler) or
issubclass(handler, zipserve.ZipHandler)):
candidate = handler
return candidate
def get_handler_for_course_type(self, context, path):
"""Gets the right handler for the given context and path."""
if not self.can_handle_course_requests(context):
return None
# TODO(psimakov): Add docs (including args and returns).
norm_path = os.path.normpath(path)
# Handle static assets here.
if norm_path.startswith(GCB_ASSETS_FOLDER_NAME):
abs_file = abspath(context.get_home_folder(), norm_path)
handler = AssetHandler(self, abs_file)
handler.request = self.request
handler.response = self.response
handler.app_context = context
STATIC_HANDLER_COUNT.inc()
return handler
# Handle all dynamic handlers here.
handler_factory = self._get_handler_factory_for_path(path)
if handler_factory:
handler = handler_factory()
handler.app_context = context
handler.request = self.request
handler.response = self.response
# This variable represents the path after the namespace prefix is
# removed. The full path is still stored in self.request.path. For
# example, if self.request.path is '/new_course/foo/bar/baz/...',
# the path_translated would be '/foo/bar/baz/...'.
handler.path_translated = path
debug('Handler: %s > %s' % (path, handler.__class__.__name__))
DYNAMIC_HANDLER_COUNT.inc()
return handler
NO_HANDLER_COUNT.inc()
return None
def before_method(self, handler, verb, path):
if hasattr(handler, 'before_method'):
handler.before_method(verb, path)
def after_method(self, handler, verb, path):
if hasattr(handler, 'after_method'):
handler.after_method(verb, path)
@appengine_config.timeandlog('invoke_http_verb')
def invoke_http_verb(self, verb, path, no_handler):
"""Sets up the environemnt and invokes HTTP verb on the self.handler."""
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
no_handler(path)
else:
set_default_response_headers(handler)
self.before_method(handler, verb, path)
try:
getattr(handler, verb.lower())()
finally:
self.after_method(handler, verb, path)
finally:
count_stats(self)
unset_path_info()
def _error_404(self, path):
"""Fail with 404."""
self.error(404)
def _login_or_404(self, path):
"""If no user, offer login page, otherwise fail 404."""
if not users.get_current_user():
self.redirect(users.create_login_url(path))
else:
self.error(404)
def get(self, path):
self.invoke_http_verb('GET', path, self._login_or_404)
def post(self, path):
self.invoke_http_verb('POST', path, self._error_404)
def put(self, path):
self.invoke_http_verb('PUT', path, self._error_404)
def delete(self, path):
self.invoke_http_verb('DELETE', path, self._error_404)
def assert_mapped(src, dest):
try:
set_path_info(src)
course = get_course_for_current_request()
if not dest:
assert course is None
else:
assert course.get_slug() == dest
finally:
unset_path_info()
def assert_handled(src, target_handler):
try:
set_path_info(src)
app_handler = ApplicationRequestHandler()
# For unit tests to work we want all requests to be handled regardless
# of course.now_available flag value. Here we patch for that.
app_handler.can_handle_course_requests = lambda context: True
handler = app_handler.get_handler()
if handler is None and target_handler is None:
return None
assert isinstance(handler, target_handler)
return handler
finally:
unset_path_info()
def assert_fails(func):
success = False
try:
func()
success = True
except Exception: # pylint: disable=W0703
pass
if success:
raise Exception('Function \'%s\' was expected to fail.' % func)
def setup_courses(course_config):
"""Helper method that allows a test to setup courses on the fly."""
Registry.test_overrides[GCB_COURSES_CONFIG.name] = course_config
def reset_courses():
"""Cleanup method to complement setup_courses()."""
Registry.test_overrides[
GCB_COURSES_CONFIG.name] = GCB_COURSES_CONFIG.default_value
def test_unprefix():
assert unprefix('/', '/') == '/'
assert unprefix('/a/b/c', '/a/b') == '/c'
assert unprefix('/a/b/index.html', '/a/b') == '/index.html'
assert unprefix('/a/b', '/a/b') == '/'
def test_rule_validations():
"""Test rules validator."""
courses = get_all_courses(rules_text='course:/:/')
assert 1 == len(courses)
# Check comments.
setup_courses('course:/a:/nsa, course:/b:/nsb')
assert 2 == len(get_all_courses())
setup_courses('course:/a:/nsa, # course:/a:/nsb')
assert 1 == len(get_all_courses())
# Check slug collisions are not allowed.
setup_courses('course:/a:/nsa, course:/a:/nsb')
assert_fails(get_all_courses)
# Check namespace collisions are not allowed.
setup_courses('course:/a:/nsx, course:/b:/nsx')
assert_fails(get_all_courses)
# Check rule order is enforced. If we allowed any order and '/a' was before
# '/aa', the '/aa' would never match.
setup_courses('course:/a:/nsa, course:/aa:/nsaa, course:/aaa:/nsaaa')
assert_fails(get_all_courses)
# Check namespace names.
setup_courses('course:/a::/nsx')
assert_fails(get_all_courses)
# Check slug validity.
setup_courses('course:/a /b::nsa')
get_all_courses()
setup_courses('course:/a?/b::nsa')
assert_fails(get_all_courses)
# Cleanup.
reset_courses()
def test_rule_definitions():
"""Test various rewrite rule definitions."""
# Check that the default site is created when no rules are specified.
assert len(get_all_courses()) == 1
# Test one rule parsing.
setup_courses('course:/google/pswg:/sites/pswg')
rules = get_all_courses()
assert len(get_all_courses()) == 1
rule = rules[0]
assert rule.get_slug() == '/google/pswg'
assert rule.get_home_folder() == '/sites/pswg'
# Test two rule parsing.
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
assert len(get_all_courses()) == 2
# Test that two of the same slugs are not allowed.
setup_courses('foo:/a/b:/c/d, bar:/a/b:/c/d')
assert_fails(get_all_courses)
# Test that only 'course' is supported.
setup_courses('foo:/a/b:/c/d, bar:/e/f:/g/h')
assert_fails(get_all_courses)
# Cleanup.
reset_courses()
# Test namespaces.
set_path_info('/')
try:
setup_courses('course:/:/c/d')
assert ApplicationContext.get_namespace_name_for_request() == (
'gcb-course-c-d')
finally:
unset_path_info()
# Cleanup.
reset_courses()
def test_url_to_rule_mapping():
"""Tests mapping of a URL to a rule."""
# default mapping
assert_mapped('/favicon.ico', '/')
assert_mapped('/assets/img/foo.png', '/')
# explicit mapping
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
assert_mapped('/a/b', '/a/b')
assert_mapped('/a/b/', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/e/f', '/e/f')
assert_mapped('/e/f/assets', '/e/f')
assert_mapped('/e/f/views', '/e/f')
assert_mapped('e/f', None)
assert_mapped('foo', None)
# Cleanup.
reset_courses()
def build_index_for_rules_text(rules_text):
Registry.test_overrides[GCB_COURSES_CONFIG.name] = rules_text
courses = get_all_courses()
index = get_course_index()
return courses, index
def test_get_course_for_path_impl():
# pylint: disable=protected-access
courses, index = build_index_for_rules_text('course:/::ns_x')
expected = {None: courses[0]}
assert expected == index._slug_parts2app_context
for path in ['', '/course', '/a/b']:
assert courses[0] == get_course_for_path(path)
courses, index = build_index_for_rules_text('course:/a::ns_x')
expected = {'a': {None: courses[0]}}
assert expected == index._slug_parts2app_context
for path in ['/a', '/a/course', '/a/b/c']:
assert courses[0] == get_course_for_path(path)
for path in ['', '/', '/course']:
assert not get_course_for_path(path)
courses, index = build_index_for_rules_text(
'course:/a::ns_x\ncourse:/b::ns_y')
expected = {'a': {None: courses[0]}, 'b': {None: courses[1]}}
assert expected == index._slug_parts2app_context
for path in ['/a', '/a/course', '/a/b/c']:
assert courses[0] == get_course_for_path(path)
for path in ['/b', '/b/course', '/b/a/c']:
assert courses[1] == get_course_for_path(path)
for path in ['', '/', '/course']:
assert not get_course_for_path(path)
courses, index = build_index_for_rules_text('course:/a/b::ns_x')
expected = {'a': {'b': {None: courses[0]}}}
assert expected == index._slug_parts2app_context
for path in ['/a/b', '/a/b/course', '/a/b/c']:
assert courses[0] == get_course_for_path(path)
for path in ['', '/a', '/a/course', '/a/c']:
assert not get_course_for_path(path)
courses, index = build_index_for_rules_text(
'course:/a/c::ns_x\ncourse:/b/d::ns_y')
expected = {'a': {'c': {None: courses[0]}}, 'b': {'d': {None: courses[1]}}}
assert expected == index._slug_parts2app_context
for path in ['/a/c', '/a/c/course', '/a/c/d']:
assert courses[0] == get_course_for_path(path)
for path in ['/b/d', '/b/d/course', '/b/d/c']:
assert courses[1] == get_course_for_path(path)
for path in ['', '/', '/course', '/a', '/b']:
assert not get_course_for_path(path)
try:
courses, index = build_index_for_rules_text(
'course:/a::ns_x\ncourse:/a/b::ns_y')
except Exception as e: # pylint: disable=broad-except
assert 'reorder course entries' in e.message
courses, index = build_index_for_rules_text(
'course:/a/b::ns_x\ncourse:/a::ns_y')
expected = {'a': {'b': {None: courses[0]}, None: courses[1]}}
assert expected == index._slug_parts2app_context
for path in ['/a/b', '/a/b/c', '/a/b/c/course', '/a/b/c/d']:
assert courses[0] == get_course_for_path(path)
for path in ['/a', '/a/c', '/a/course', '/a/c/d']:
assert courses[1] == get_course_for_path(path)
for path in ['/', '/course', '/b']:
assert not get_course_for_path(path)
# pylint: enable=protected-access
def test_get_course_for_path():
"""Tests linear and indexed search to make sure both work the same way."""
CourseIndex.CAN_USE_INDEXED_GETTER = False
test_get_course_for_path_impl()
CourseIndex.CAN_USE_INDEXED_GETTER = True
test_get_course_for_path_impl()
def test_url_to_handler_mapping_for_course_type():
"""Tests mapping of a URL to a handler for course type."""
# setup rules
setup_courses('course:/a/b:/c/d, course:/e/f:/g/h')
# setup helper classes
class FakeHandler0(object):
def __init__(self):
self.app_context = None
class FakeHandler1(object):
def __init__(self):
self.app_context = None
class FakeHandler2(zipserve.ZipHandler):
def __init__(self):
super(FakeHandler2, self).__init__()
self.app_context = None
class FakeHandler3(zipserve.ZipHandler):
def __init__(self):
super(FakeHandler3, self).__init__()
self.app_context = None
class FakeHandler4(zipserve.ZipHandler):
def __init__(self):
super(FakeHandler4, self).__init__()
self.app_context = None
# Setup handler.
handler0 = FakeHandler0
handler1 = FakeHandler1
handler2 = FakeHandler2
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
ApplicationRequestHandler.bind(urls)
# Test proper handler mappings.
assert_handled('/a/b', FakeHandler0)
assert_handled('/a/b/', FakeHandler0)
assert_handled('/a/b/foo', FakeHandler1)
assert_handled('/a/b/bar', FakeHandler2)
# Test partial path match.
assert_handled('/a/b/foo/bee', None)
assert_handled('/a/b/bar/bee', FakeHandler2)
# Test assets mapping.
handler = assert_handled('/a/b/assets/img/foo.png', AssetHandler)
assert AbstractFileSystem.normpath(
handler.app_context.get_template_home()).endswith(
AbstractFileSystem.normpath('/c/d/views'))
# This is allowed as we don't go out of /assets/...
handler = assert_handled(
'/a/b/assets/foo/../models/models.py', AssetHandler)
assert AbstractFileSystem.normpath(handler.filename).endswith(
AbstractFileSystem.normpath('/c/d/assets/models/models.py'))
# This is not allowed as we do go out of /assets/...
assert_handled('/a/b/assets/foo/../../models/models.py', None)
# Test negative cases
assert_handled('/foo', None)
assert_handled('/baz', None)
# Site 'views' and 'data' are not accessible
assert_handled('/a/b/view/base.html', None)
assert_handled('/a/b/data/units.csv', None)
# Default mapping
reset_courses()
handler3 = FakeHandler3
handler4 = FakeHandler4
urls = [
('/', handler0),
('/foo', handler1),
('/bar', handler2),
('/zip', handler3),
('/zip/a/b', handler4)]
ApplicationRequestHandler.bind(urls)
# Positive cases
assert_handled('/', FakeHandler0)
assert_handled('/foo', FakeHandler1)
assert_handled('/bar', FakeHandler2)
handler = assert_handled('/assets/js/main.js', AssetHandler)
assert AbstractFileSystem.normpath(
handler.app_context.get_template_home()).endswith(
AbstractFileSystem.normpath('/views'))
# Partial URL matching cases test that the most specific match is found.
assert_handled('/zip', FakeHandler3)
assert_handled('/zip/a', FakeHandler3)
assert_handled('/zip/a/b', FakeHandler4)
assert_handled('/zip/a/b/c', FakeHandler4)
# Negative cases
assert_handled('/baz', None)
assert_handled('/favicon.ico', None)
assert_handled('/e/f/index.html', None)
assert_handled('/foo/foo.css', None)
# Clean up.
ApplicationRequestHandler.bind([])
def test_namespace_collisions_are_detected():
"""Test that namespace collisions are detected and are not allowed."""
setup_courses('foo:/a/b:/c/d, bar:/a/b:/c-d')
assert_fails(get_all_courses)
reset_courses()
def test_path_construction():
"""Checks that path_join() works correctly."""
# Test cases common to all platforms.
assert (os.path.normpath(path_join('/a/b', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b', 'c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', 'c')) ==
os.path.normpath('/a/b/c'))
# Windows-specific test cases.
drive, unused_path = os.path.splitdrive('c:\\windows')
if drive:
assert (os.path.normpath(path_join('/a/b', 'c:/d')) ==
os.path.normpath('/a/b/d'))
assert (os.path.normpath(path_join('/a/b/', 'c:/d')) ==
os.path.normpath('/a/b/d'))
def run_all_unit_tests():
assert not ApplicationRequestHandler.CAN_IMPERSONATE
ApplicationContext.DEBUG_INFO = True
ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = True
test_get_course_for_path()
test_namespace_collisions_are_detected()
test_unprefix()
test_rule_definitions()
test_url_to_rule_mapping()
test_url_to_handler_mapping_for_course_type()
test_path_construction()
test_rule_validations()
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for custom HTML tags."""
__author__ = 'John Orr (jorr@google.com)'
import logging
import mimetypes
import os
import re
from xml.etree import cElementTree
import html5lib
import lxml.html
import safe_dom
import webapp2
import appengine_config
from common import schema_fields
from models import config
CAN_USE_DYNAMIC_TAGS = config.ConfigProperty(
'gcb_can_use_dynamic_tags', bool, safe_dom.Text(
'Whether lesson content can make use of custom HTML tags such as '
'<gcb-youtube videoid="...">. If this is enabled some legacy content '
'may be rendered differently. '),
default_value=True)
DUPLICATE_INSTANCE_ID_MESSAGE = (
'Error processing custom HTML tag: duplicate tag id')
INVALID_HTML_TAG_MESSAGE = 'Invalid HTML tag'
class BaseTag(object):
"""Base class for the custom HTML tags."""
@classmethod
def name(cls):
return cls.__name__
@classmethod
def vendor(cls):
return cls.__module__
@classmethod
def required_modules(cls):
"""Lists the inputEx modules required by the editor."""
return []
@classmethod
def extra_js_files(cls):
"""Returns a list of JS files to be loaded in the editor lightbox."""
return []
@classmethod
def extra_css_files(cls):
"""Returns a list of CSS files to be loaded in the editor lightbox."""
return []
@classmethod
def additional_dirs(cls):
"""Returns a list of directories searched for files used by the editor.
These folders will be searched for files to be loaded as Jinja
templates by the editor, e.g., the files referenced by extra_js_files
and extra_css_files.
Returns:
List of strings.
"""
return []
def render(self, node, handler): # pylint: disable=W0613
"""Receive a node and return a node.
Args:
node: cElementTree.Element. The DOM node for the tag which should be
rendered.
handler: controllers.utils.BaseHandler. The server runtime.
Returns:
A cElementTree.Element holding the rendered DOM.
"""
return cElementTree.XML('<div>[Unimplemented custom tag]</div>')
def get_icon_url(self):
"""Return the URL for the icon to be displayed in the rich text editor.
Images should be placed in a folder called 'resources' inside the main
package for the tag definitions.
Returns:
the URL for the icon to be displayed in the editor.
"""
return """
data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAYAAABXAvmHAAAAAXNSR0IArs
4c6QAAAAZiS0dEAP8A/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB90EGgAIFHpT6h
8AAAAZdEVYdENvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAC30lEQVRo3u1ZP2sqQRCfVVGUXC
FqoZAmbSBYxFikMojBD2ErkgdC/AxpAn4A2wRMKptgCrWwSApBEG2DCidcI0gIxogXnXnFI5I87y6Jd6
seOHDN7LL7+83u/Nk5hoh/wMTCEJHMTMDGGDMzfrCAyWVL4DdCZLy72YwCxhgDIoKXlxcQRREeHx9BFE
WYTqfg9XohGAxCKBSCnZ0dcDqdhlrFEKlWq8QYIwD49ovFYjQajYiICBF17auLACLSbDaj3d3dObizsz
Nqt9v09PRE8Xhck0gul9NtONADnojI7XbPAXW73YV55XJZk8TFxcX6TuDk5GQORBAE1StxeXmpSaJery
99lWBZ69dqtQUgpVJJcW6/39cksL+/v/oTiEajC0DsdjvNZjPF+Q6HQ5PEsrJ0Huj1egs6WZbh+flZcX
4kEtFcr1KprDaRybKsqL++vlbU+/1+zfVEUVwtAZ/Pp6h/f39X1COi5nqBQGC1iaxUKine5eFwqDg/Fo
tx8QFdYfTm5uYLiPv7e0JExZD4OV/8/+3t7a0vkcmyTJIk0Xg8Vs0Dr6+vmta/vb1dbR74rTw8PKiCPz
09XV8m/qmEQiFF8IeHh7oLOq4EEJGazaam5ddajf5ElKJPNps1BDxXAohIjUbjC3CPx0OTycTQfbiewO
f3QDKZ5LIHVwIf4PP5vGFXZmUErq6uCAAok8lw9TFuBFKp1LxE4GF53eX0d10KSZLg+Pj4X/+SY/ePCw
HGGIzHYzg6OuLfG+W18MHBAYTDYf7daeLRLtv2RrcE9DdvC4UC5PN5mE6n3DvGhtU+RETn5+cLxVsikT
BHIru7u1N9uKTTaS4EDItCiAhWq1V13OVywWg02lwfGA6HmuNvb2+b7cQWi8XcUUgQBPB6varjWmMbE0
Y7nY5q4VYsFs0RRvv9PgmCMI8+VquVWq0WtzBqaC308bMPAGAwGAAiqvZQt8XcthbaELGZ/AbBX0kdVa
SPB+uxAAAAAElFTkSuQmCC
"""
def get_schema(self, unused_handler):
"""Return the list of fields which will be displayed in the editor.
This method assembles the list of fields which will be displayed in
the rich text editor when a user double-clicks on the icon for the tag.
The fields are a list of SchemaField objects in a FieldRegistry
container. Each SchemaField has the actual attribute name as used in
the tag, the display name for the form, and the type (usually
string).
The schema field type of "text" plays a special role: a tag is allowed
to have at most one field of type "text", and this is stored in the body
of the tag, not as an attribute.
Args:
unused_handler: a request handler; if None is received, the request
is being made by the system and there is no user in session; the
minimal schema must be returned in this case; don't attempt to
access course, app_context, file system, datastore, etc. in this
case; if a valid handler object is received, the request is being
made by a real user and schema can have additional data binding in
it; for example: 'select_data' can be computed and set by accessing
course, app_context, filesyste, datastore, etc.
Returns:
the list of fields to be displayed in the editor.
"""
reg = schema_fields.FieldRegistry('Unimplemented Custom Tag')
return reg
def unavailable_schema(self, message):
"""Utility to generate a schema for a "not available" message."""
reg = schema_fields.FieldRegistry(self.name())
reg.add_property(
schema_fields.SchemaField(
'unused_id', '', 'string', optional=True,
editable=False, extra_schema_dict_values={
'value': message,
'visu': {
'visuType': 'funcName',
'funcName': 'disableSave'}}))
return reg
class ContextAwareTag(BaseTag):
"""A tag which shares a context with other tags of the same type."""
class Context(object):
"""Carries the environment and other data used by the tag."""
def __init__(self, handler, env):
"""Initialize the context.
Args:
handler: controllers.utils.BaseHandler. The server runtime.
env: dict. A dict of values shared shared between instances of
the tag on the same page. Values stored in this dict will be
available to subsequent calls to render() on the same page,
and to the call to rollup_header_footer() made at the end of
the page. Use this to store things like JS library refs
which can be de-dup'd and put in the header or footer.
"""
self.handler = handler
self.env = env
def render(self, node, context): # pylint: disable=W0613
"""Receive a node and return a node.
Args:
node: cElementTree.Element. The DOM node for the tag which should be
rendered.
context: Context. The context shared between instances of the tag.
Returns:
A cElementTree.Element holding the rendered DOM.
"""
return super(ContextAwareTag, self).render(node, context.handler)
def rollup_header_footer(self, context):
"""Roll up header and footer from data stored in the tag environment.
This method is called once at the end of page processing. It receives
the context object, which has been passed to all rendering methods for
this tag on the page, and which accumulates data stored by the
renderers.
Args:
context: Context. Holds data set in an environment dict by previous
calls to render, containing, e.g., URLs of CSS or JS resources.
Returns:
A pair of cElementTree.Element's (header, footer).
"""
pass
class ResourcesHandler(webapp2.RequestHandler):
"""Content handler for resources associated with custom tags."""
def rebase_path(self, path):
"""Override this method to rebase the path to a different root."""
return path
def transform_resource(self, resource_str):
"""Override this method to apply a transforation to the resource."""
return resource_str
def get(self):
"""Respond to HTTP GET methods."""
path = self.rebase_path(self.request.path)
if path.startswith('/'):
path = path[1:]
path = os.path.normpath(path)
resource_file = os.path.join(appengine_config.BUNDLE_ROOT, path)
mimetype = mimetypes.guess_type(resource_file)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
try:
self.response.status = 200
self.response.headers['Content-Type'] = mimetype
self.response.cache_control.no_cache = None
self.response.cache_control.public = 'public'
self.response.cache_control.max_age = 600
stream = open(resource_file)
self.response.write(self.transform_resource(stream.read()))
except IOError:
self.error(404)
class JQueryHandler(ResourcesHandler):
"""A content handler which serves jQuery scripts wrapped in $.ready()."""
def transform_resource(self, resource_str):
return '$(function() {%s});' % resource_str
class IifeHandler(ResourcesHandler):
"""A content handler which serves JavaScript wrapped in an immediately
invoked function expression (IIFE).
"""
def transform_resource(self, resource_str):
return '(function() {%s})();' % resource_str
class EditorBlacklists(object):
"""Lists tags which should not be supported by various editors."""
COURSE_SCOPE = set()
ASSESSMENT_SCOPE = set()
DESCRIPTIVE_SCOPE = set()
@classmethod
def register(cls, tag_name, editor_set):
editor_set.add(tag_name)
@classmethod
def unregister(cls, tag_name, editor_set):
if tag_name in editor_set:
editor_set.remove(tag_name)
class Registry(object):
"""A class that holds all dynamically registered tags."""
_bindings = {}
@classmethod
def add_tag_binding(cls, tag_name, clazz):
"""Registers a tag name to class binding."""
cls._bindings[tag_name] = clazz
@classmethod
def remove_tag_binding(cls, tag_name):
"""Unregisters a tag binding."""
if tag_name in cls._bindings:
del cls._bindings[tag_name]
@classmethod
def get_all_tags(cls):
return dict(cls._bindings.items())
def get_tag_bindings():
return dict(Registry.get_all_tags().items())
def html_string_to_element_tree(html_string):
parser = html5lib.HTMLParser(
tree=html5lib.treebuilders.getTreeBuilder('etree', cElementTree),
namespaceHTMLElements=False)
return parser.parseFragment('<div>%s</div>' % html_string)[0]
def html_to_safe_dom(html_string, handler, render_custom_tags=True):
"""Render HTML text as a tree of safe_dom elements."""
tag_bindings = get_tag_bindings()
node_list = safe_dom.NodeList()
if not html_string:
return node_list
# Set of all instance id's used in this dom tree, used to detect duplication
used_instance_ids = set([])
# A dictionary of environments, one for each tag type which appears in the
# page
tag_contexts = {}
def _generate_error_message_node_list(elt, error_message):
"""Generates a node_list representing an error message."""
logging.error(
'[%s, %s]: %s.', elt.tag, dict(**elt.attrib), error_message)
node_list = safe_dom.NodeList()
node_list.append(safe_dom.Element(
'span', className='gcb-error-tag'
).add_text(error_message))
if elt.tail:
node_list.append(safe_dom.Text(elt.tail))
return node_list
def _remove_namespace(tag_name):
# Remove any namespacing which html5lib may have introduced. Html5lib
# namespacing is of the form, e.g.,
# {http://www.w3.org/2000/svg}svg
return re.sub(r'^\{[^\}]+\}', '', tag_name, count=1)
def _process_html_tree(elt):
"""Recursively parses an HTML tree into a safe_dom.NodeList()."""
# Return immediately with an error message if a duplicate instanceid is
# detected.
if 'instanceid' in elt.attrib:
if elt.attrib['instanceid'] in used_instance_ids:
return _generate_error_message_node_list(
elt, DUPLICATE_INSTANCE_ID_MESSAGE)
used_instance_ids.add(elt.attrib['instanceid'])
# Otherwise, attempt to parse this tag and all its child tags.
original_elt = elt
try:
if render_custom_tags and elt.tag in tag_bindings:
tag = tag_bindings[elt.tag]()
if isinstance(tag, ContextAwareTag):
# Get or initialize a environment dict for this type of tag.
# Each tag type gets a separate environment shared by all
# instances of that tag.
context = tag_contexts.get(elt.tag)
if context is None:
context = ContextAwareTag.Context(handler, {})
tag_contexts[elt.tag] = context
# Render the tag
elt = tag.render(elt, context)
else:
# Render the tag
elt = tag.render(elt, handler)
if elt.tag == cElementTree.Comment:
out_elt = safe_dom.Comment()
elif elt.tag.lower() == 'script':
out_elt = safe_dom.ScriptElement()
else:
out_elt = safe_dom.Element(_remove_namespace(elt.tag))
out_elt.add_attribute(**elt.attrib)
if elt.text:
out_elt.add_text(elt.text)
for child in elt:
out_elt.add_children(
_process_html_tree(child))
node_list = safe_dom.NodeList()
node_list.append(out_elt)
if original_elt.tail:
node_list.append(safe_dom.Text(original_elt.tail))
return node_list
except Exception as e: # pylint: disable=broad-except
logging.exception('Error handling tag: %s', elt.tag)
return _generate_error_message_node_list(
original_elt, '%s: %s' % (INVALID_HTML_TAG_MESSAGE, e))
root = html_string_to_element_tree(html_string)
if root.text:
node_list.append(safe_dom.Text(root.text))
for child_elt in root:
node_list.append(_process_html_tree(child_elt))
# After the page is processed, rollup any global header/footer data which
# the environment-aware tags have accumulated in their env's
for tag_name, context in tag_contexts.items():
header, footer = tag_bindings[tag_name]().rollup_header_footer(context)
node_list.insert(0, _process_html_tree(header))
node_list.append(_process_html_tree(footer))
return node_list
def get_components_from_html(html):
"""Returns a list of dicts representing the components in a lesson.
Args:
html: a block of html that may contain some HTML tags representing
custom components.
Returns:
A list of dicts. Each dict represents one component and has two
keys:
- instanceid: the instance id of the component
- cpt_name: the name of the component tag (e.g. gcb-googlegroup)
"""
content = lxml.html.fromstring('<div>%s</div>' % html)
components = []
for component in content.xpath('.//*[@instanceid]'):
component_dict = {'cpt_name': component.tag}
component_dict.update(component.attrib)
components.append(component_dict)
return components
| Python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unified method of referring to to heterogenous resources in courses"""
__author__ = 'Mike Gainer (mgainer@google.com)'
class AbstractResourceHandler(object):
"""Unified accessor for heterogenous resources within CourseBuilder.
CourseBuilder contains a number of different resources, such as
questions, units, lessons, course settings, etc. There are a number
of features that are concerned with acting on some or all of these
types, and would like to do so polymorphically. (E.g., I18N,
skill mapping, and other 3rd-party modules).
"""
# Derived classes must set TYPE to a short, globally-unique string. This
# string may only contain lowercase letters, numbers, and underscores.
TYPE = None
@classmethod
def get_key(cls, instance):
"""Returns a key for the given instance.
Args:
instance: And instance of a Course Builder resource.
Returns:
A Key for that instance.
"""
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_resource(cls, course, key):
"""Returns an instance of the resource type.
Args:
course: A courses.Course instance
key: A small fact (string or integer, typically) representing
the primary key for the desired instance.
Returns:
A loaded instance of the type appropriate for the Handler subtype.
Note that this can be very broadly interpreted. For example,
since it is so common to need the Unit corresponding to a Lesson,
this function in ResourceLesson returns a 2-tuple of the unit
and lesson, rather than just the lesson.
"""
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_resource_title(cls, resource):
"""Get a title for the resource.
Args:
resource: Whatever is returned from get_resource() (q.v.)
Returns:
A short human-friendly string for titling the resource.
NOTE: This string is not I18N'd - it is the actual string
from the resource, before translation. This string is
suitable for display in dashboard contexts, where it is
OK to presume a reasonable working knowledge of English,
but not on student-facing pages.
"""
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_schema(cls, course, key):
"""Return a schema describing the value returned from get_data_dict().
Again, note that in theory, the specific identity of the item in
question should not be required to get what should be a generic
schema. The difference between theory and practice....
Args:
course: A courses.Course instance.
key: A small fact (string or integer, typically) representing
the primary key for the desired instance.
Returns:
A schema_fields.FieldRegistry instance.
"""
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_data_dict(cls, course, key):
"""Return a simple dict expression of the object's data.
This is typically used in REST editors and other similar import/
export related scenarios.
Args:
course: A courses.Course instance.
key: A small fact (string or integer, typically) representing
the primary key for the desired instance.
Returns:
A dict corresponding to the schema from get_schema().
"""
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_view_url(cls, resource):
"""Return a URL that will show a student view of the item.
Not all classes need to return a reasonable value here. For
example, Labels and Skills may just not have a simple student-visible
representation. It is fine in those cases to return None; the
caller must deal with this situation appropriately.
resource: Whatever is returned from get_resource() (q.v.)
Returns:
A *relative* URL. E.g., dashboard?action=foo&tab=bar Such a
URL can be placed unmmodified on a page which has been set
up with the default URL prefix pointing to the namespace for
the current course.
"""
raise NotImplementedError('Derived classes must implement this.')
@classmethod
def get_edit_url(cls, key):
"""Return a dashboard URL for editing the resource.
All classes should implement this function. If it is hard to
implement this, then you may have made a poor selection as to
the noun that you're trying to represent.
Args:
key: A small fact (string or integer, typically) representing
the primary key for the desired instance.
Returns:
A *relative* URL. E.g., dashboard?action=foo&tab=bar Such a
URL can be placed unmmodified on a page which has been set
up with the default URL prefix pointing to the namespace for
the current course.
"""
raise NotImplementedError('Derived classes must implement this.')
class Registry(object):
_RESOURCE_HANDLERS = {}
@classmethod
def register(cls, resource_handler):
"""Object types wishing to be generically handled register here.
Args:
resource_handler: A class that inherits from AbstractResourceHandler,
above.
"""
type_name = resource_handler.TYPE
if type_name in cls._RESOURCE_HANDLERS:
raise ValueError(
'The type name "%s" is already registered as a resource.' %
type_name)
cls._RESOURCE_HANDLERS[type_name] = resource_handler
@classmethod
def get(cls, name):
if not cls.is_valid_name(name):
raise ValueError('Unknown resource type: %s' % name)
return cls._RESOURCE_HANDLERS[name]
@classmethod
def is_valid_name(cls, name):
return name in cls._RESOURCE_HANDLERS
class Key(object):
"""Manages key for Course Builder resource.
Every Course Builder resource can be identified by a type name and a
type-contextual key. This class holds data related to this keying, and
manages serialization/deserialization as strings.
"""
def __init__(self, type_str, key, course=None):
self._type = type_str
self._key = key
self._course = course
assert Registry.is_valid_name(self._type), (
'Unknown resource type: %s' % type_str)
def __str__(self):
return '%s:%s' % (self._type, self._key)
@property
def type(self):
return self._type
@property
def key(self):
return self._key
@classmethod
def fromstring(cls, key_str):
index = key_str.index(':')
return Key(key_str[:index], key_str[index + 1:])
def get_resource(self, course):
course = course or self._course
return Registry.get(self._type).get_resource(course, self._key)
def get_schema(self, course):
return Registry.get(self._type).get_schema(course, self._key)
def get_data_dict(self, course):
return Registry.get(self._type).get_data_dict(course, self._key)
| Python |
# -*- coding: utf-8; -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions used for Course Builder locale support."""
__author__ = 'John Orr (jorr@google.com)'
import logging
import os
import re
import appengine_config
# Locale description information based on Babel Locale.display_name. However
# the names are collected here because (i) Babel does not have correct display
# names for all of the locales included, and (ii) Babel cannot access its
# localeinfo data when it is loaded as a Zip file.
LOCALES_DISPLAY_NAMES = {
'af': u'Afrikaans (af)',
'am': u'አማርኛ (am)',
'ar': u'العربية (ar)',
'bg': u'български (bg)',
'bn': u'বাংলা (bn)',
'ca': u'català (ca)',
'cs': u'čeština (cs)',
'da': u'dansk (da)',
'de': u'Deutsch (de)',
'el': u'Ελληνικά (el)',
'en_GB': u'British English (en_GB)',
'en_US': u'U.S. English (en_US)',
'es': u'español (es)',
'et': u'eesti (et)',
'eu': u'euskara (eu)',
'fa': u'فارسی (fa)',
'fi': u'suomi (fi)',
'fil': u'Filipino (fil)',
'fr': u'français (fr)',
'gl': u'galego (gl)',
'gu': u'ગુજરાતી (gu)',
'hi': u'हिन्दी (hi)',
'hr': u'hrvatski (hr)',
'hu': u'magyar (hu)',
'id': u'Bahasa Indonesia (id)',
'is': u'íslenska (is)',
'it': u'italiano (it)',
'iw': u'עברית (iw)', # Former ISO-639 code for Hebrew; should now be he
'ja': u'日本語 (ja)',
'kn': u'ಕನ್ನಡ (kn)',
'ko': u'한국어 (ko)',
'ln': u'Fake Translation (ln)',
'lt': u'lietuvių (lt)',
'lv': u'latviešu (lv)',
'ml': u'മലയാളം (ml)',
'mr': u'मराठी (mr)',
'ms': u'Bahasa Melayu (ms)',
'nl': u'Nederlands (nl)',
'no': u'Nynorsk (no)', # Correct ISO-369-1 is nn and ISO-369-2 is nno
'pl': u'polski (pl)',
'pt_BR': u'português do Brasil (pt_BR)',
'pt_PT': u'português europeu (pt_PT)',
'ro': u'română (ro)',
'ru': u'русский (ru)',
'sk': u'slovenský (sk)',
'sl': u'slovenščina (sl)',
'sr': u'Српски (sr)',
'sv': u'svenska (sv)',
'sw': u'Kiswahili (sw)',
'ta': u'தமிழ் (ta)',
'te': u'తెలుగు (te)',
'th': u'ไทย (th)',
'tr': u'Türkçe (tr)',
'uk': u'українська (uk)',
'ur': u'اردو (ur)',
'vi': u'Tiếng Việt (vi)',
'zh_CN': u'中文 (简体) (zh_CN)', # Chinese (Simplified)
'zh_TW': u'中文 (繁體) (zh_TW)', # Chinese (Traditional)
'zu': u'isiZulu (zu)',
}
def get_system_supported_locales():
translations_path = os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')
return sorted(os.listdir(translations_path) + ['ln'])
def get_locale_display_name(locale):
return LOCALES_DISPLAY_NAMES.get(locale, locale)
def parse_accept_language(accept_language_str):
"""Parse a RFC 2616 Accept-Language string.
Accept-Language strings are of the form
en-US,en;q=0.8,el;q=0.6
where each language string (en-US, en, el) may be followed by a quality
score (q). So in the example US English has default quality score (1),
English has quality score 0.8, and Greek has quality score 0.6.
Args:
accept_language_str: str. A string in RFC 2616 format. If the string is
None or empty, an empty list is return.
Returns:
A list of pairs. The first element of the pair is the language code
(a str) and the second element is either a float between 0 and 1.
The list is sorted in decreasing order by q, so that the highest
quality language is the first element of the list.
Refs:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14
"""
if not accept_language_str:
return []
assert len(accept_language_str) < 8 * 1024
parsed = []
try:
for item in accept_language_str.split(','):
lang = item.strip()
q = 1.0
if ';' in item:
lang, q_str = item.split(';')
q = float(q_str[2:]) if q_str.startswith('q=') else float(q_str)
components = lang.split('-')
if not all([re.match('^[a-zA-Z]+$', c) for c in components]):
continue
lang = '_'.join(
[components[0].lower()] + [c.upper() for c in components[1:]])
parsed.append((lang, q))
return sorted(parsed, None, lambda x: -x[1])
except Exception: # pylint: disable=broad-except
logging.exception('Bad Accept-Languager: %s', accept_language_str)
return []
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom Jinja2 filters used in Course Builder."""
__author__ = 'John Orr (jorr@google.com)'
import sys
import traceback
import jinja2
import safe_dom
import tags
from webapp2_extras import i18n
import appengine_config
from common import caching
from models import config
from models import models
from models.counters import PerfCounter
# max size for in-process jinja template cache
MAX_GLOBAL_CACHE_SIZE_BYTES = 8 * 1024 * 1024
# this cache used to be memcache based; now it's in-process
CAN_USE_JINJA2_TEMPLATE_CACHE = config.ConfigProperty(
'gcb_can_use_jinja2_template_cache', bool, safe_dom.Text(
'Whether jinja2 can cache bytecode of compiled templates in-process.'),
default_value=True)
def finalize(x):
"""A finalize method which will correctly handle safe_dom elements."""
if isinstance(x, safe_dom.Node) or isinstance(x, safe_dom.NodeList):
return jinja2.utils.Markup(x.sanitized)
return x
def js_string_raw(data):
"""Escape a string so that it can be put in a JS quote."""
if not isinstance(data, basestring):
return data
data = data.replace('\\', '\\\\')
data = data.replace('\r', '\\r')
data = data.replace('\n', '\\n')
data = data.replace('\b', '\\b')
data = data.replace('"', '\\"')
data = data.replace("'", "\\'")
data = data.replace('<', '\\u003c')
data = data.replace('>', '\\u003e')
data = data.replace('&', '\\u0026')
return data
def js_string(data):
return jinja2.utils.Markup(js_string_raw(data))
def get_gcb_tags_filter(handler):
@appengine_config.timeandlog('get_gcb_tags_filter')
def gcb_tags(data):
"""Apply GCB custom tags, if enabled. Otherwise pass as if by 'safe'."""
data = unicode(data)
if tags.CAN_USE_DYNAMIC_TAGS.value:
return jinja2.utils.Markup(tags.html_to_safe_dom(data, handler))
else:
return jinja2.utils.Markup(data)
return gcb_tags
class ProcessScopedJinjaCache(caching.ProcessScopedSingleton):
"""This class holds in-process cache of Jinja compiled templates."""
@classmethod
def get_cache_len(cls):
return len(ProcessScopedJinjaCache.instance().cache.items.keys())
@classmethod
def get_cache_size(cls):
return ProcessScopedJinjaCache.instance().cache.total_size
def __init__(self):
self.cache = caching.LRUCache(
max_size_bytes=MAX_GLOBAL_CACHE_SIZE_BYTES)
self.cache.get_entry_size = self._get_entry_size
def _get_entry_size(self, key, value):
return sys.getsizeof(key) + sys.getsizeof(value)
class JinjaBytecodeCache(jinja2.BytecodeCache):
"""Jinja-compatible cache backed by global in-process Jinja cache."""
def __init__(self, prefix):
self.prefix = prefix
def load_bytecode(self, bucket):
found, _bytes = ProcessScopedJinjaCache.instance().cache.get(
self.prefix + bucket.key)
if found and _bytes is not None:
bucket.bytecode_from_string(_bytes)
def dump_bytecode(self, bucket):
_bytes = bucket.bytecode_to_string()
ProcessScopedJinjaCache.instance().cache.put(
self.prefix + bucket.key, _bytes)
JINJA_CACHE_LEN = PerfCounter(
'gcb-models-JinjaBytecodeCache-len',
'A total number of items in Jinja cache.')
JINJA_CACHE_SIZE_BYTES = PerfCounter(
'gcb-models-JinjaBytecodeCache-bytes',
'A total size of items in Jinja cache in bytes.')
JINJA_CACHE_LEN.poll_value = ProcessScopedJinjaCache.get_cache_len
JINJA_CACHE_SIZE_BYTES.poll_value = ProcessScopedJinjaCache.get_cache_size
def create_jinja_environment(loader, locale=None, autoescape=True):
"""Create proper jinja environment."""
cache = None
if CAN_USE_JINJA2_TEMPLATE_CACHE.value:
prefix = 'jinja2:bytecode:%s:/' % models.MemcacheManager.get_namespace()
cache = JinjaBytecodeCache(prefix)
jinja_environment = jinja2.Environment(
autoescape=autoescape, finalize=finalize,
extensions=['jinja2.ext.i18n'], bytecode_cache=cache, loader=loader)
jinja_environment.filters['js_string'] = js_string
if locale:
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
old_handle_exception = jinja_environment.handle_exception
def _handle_exception(exc_info=None, rendered=False, source_hint=None):
"""Handle template exception."""
traceback.print_exc(exc_info)
result = old_handle_exception(exc_info, rendered, source_hint)
return result
jinja_environment.handle_exception = _handle_exception
return jinja_environment
def get_template(
template_name, dirs, handler=None, autoescape=True):
"""Sets up an environment and gets jinja template."""
# Defer to avoid circular import.
from controllers import sites
locale = None
app_context = sites.get_course_for_current_request()
if app_context:
locale = app_context.get_current_locale()
if not locale:
locale = app_context.default_locale
if not locale:
locale = 'en_US'
jinja_environment = create_jinja_environment(
jinja2.FileSystemLoader(dirs), locale=locale, autoescape=autoescape)
jinja_environment.filters['gcb_tags'] = get_gcb_tags_filter(handler)
return jinja_environment.get_template(template_name)
| Python |
#!/usr/bin/python
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for processing various .yaml files in CourseBuilder installations."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import copy
import re
import yaml
NEWLINE_BEFORE_YAML_SECTIONS = set([
'env_variables',
'includes',
'inbound_services',
'builtins',
'libraries',
'handlers',
])
class CourseBuilderYamlFormatter(yaml.Dumper):
"""Custom formatter to generate CourseBuilder style in yaml files."""
def __init__(self, *args, **kwargs):
super(CourseBuilderYamlFormatter, self).__init__(*args, **kwargs)
self.best_width = 0 # Minimize line merging
# Add newlines before major sections for good visual parsing.
def emit(self, item):
if (isinstance(item, yaml.ScalarEvent) and
str(item.value) in NEWLINE_BEFORE_YAML_SECTIONS):
self.write_line_break()
self.write_line_break()
super(CourseBuilderYamlFormatter, self).emit(item)
# For very long lines, don't leave 1st item in element on same line
# as name of element; instead, move to next line so all parts have
# the same indent. (E.g., for GCB_REGISTERED_MODULES list)
def write_plain(self, text, split):
if len(text) > 80 or ' ' in text:
self.write_line_break()
self.write_indent()
super(CourseBuilderYamlFormatter, self).write_plain(text, split)
class AppYamlFile(object):
"""Parse, modify, and write app.yaml file."""
def __init__(self, name):
self._name = name
self._loaded = False
def _lazy_load(self):
if self._loaded:
return
with open(self._name) as fp:
self._root = yaml.compose(fp)
# Root value is a list of 2-tuples for name/value of top-level
# items in yaml file.
for item in self._root.value:
if item[0].value == 'env_variables':
self._env_vars = item[1].value
if item[0].value == 'libraries':
self._library_list = item[1].value
if item[0].value == 'application':
self._application = item[1].value
# Libraries item is a list of name/value 2-tuples.
# Extract name and version for each library.
self._lib_versions = {}
for lib_spec in self._library_list:
name = None
vers = None
for lib_item in lib_spec.value:
if lib_item[0].value == 'name':
name = lib_item[1].value
elif lib_item[0].value == 'version':
vers = lib_item[1].value
if name and vers:
self._lib_versions[name] = vers
self._loaded = True
def write(self):
self._lazy_load()
content = yaml.serialize(self._root, stream=None,
Dumper=CourseBuilderYamlFormatter)
with open(self._name, 'w') as fp:
fp.write(content)
def require_library(self, library, version):
"""Add tree nodes for new library if it is not already called for."""
self._lazy_load()
if library in self._lib_versions:
if version != self._lib_versions[library]:
raise ValueError(
'Library "%s" is already required ' % library +
'at version "%s". ' % self._lib_versions[library] +
'Cannot satisfy request for version "%s".' % version)
return False
added_lib = copy.deepcopy(self._library_list[0])
added_lib.value[0][1].value = library
added_lib.value[1][1].value = version
self._library_list.append(added_lib)
self._library_list.sort(key=lambda x: x.value[0][1].value)
return True
def set_env(self, var_name, var_value):
self._lazy_load()
var_value = var_value.strip()
env_var = None
for member in self._env_vars:
if member[0].value == var_name:
env_var = member
break
if var_value:
if not env_var:
env_var_name = yaml.ScalarNode('tag:yaml.org,2002:str',
var_name)
env_var_value = yaml.ScalarNode('tag:yaml.org,2002:str',
var_value)
env_var = (env_var_name, env_var_value)
self._env_vars.append(env_var)
else:
env_var[1].value = var_value
else:
if env_var:
self._env_vars.remove(env_var)
def get_env(self, var_name):
self._lazy_load()
for env_var in self._env_vars:
if env_var[0].value == var_name:
return env_var[1].value
return None
def get_all_env(self):
self._lazy_load()
ret = {}
for env_var in self._env_vars:
ret[env_var[0].value] = env_var[1].value
return ret
@property
def application(self):
self._lazy_load()
return self._application
class ModuleManifest(object):
"""Parse module.yaml files into object providing convienent properties."""
def __init__(self, path):
self._path = path
self._loaded = False
def _lazy_load(self):
if self._loaded:
return
with open(self._path) as fp:
module_spec = yaml.load(fp)
self._main_module = module_spec['module_name']
parts = self._main_module.split('.')
if parts[0] != 'modules' or len(parts) < 2:
raise ValueError(
'module_name is expected to name the main python file '
'under CourseBuilder as: modules.<module>.<filename>')
self._module_name = parts[1]
self._required_version = module_spec['container_version']
self._third_party_libraries = module_spec.get(
'third_party_libraries', {})
self._appengine_libraries = module_spec.get(
'appengine_libraries', {})
self._tests = module_spec['tests']
self._loaded = True
def assert_version_compatibility(self, actual_version):
self._lazy_load()
for required, actual in zip(re.split(r'[-.]', self._required_version),
re.split(r'[-.]', actual_version)):
if int(required) < int(actual):
break
if int(required) > int(actual):
raise ValueError(
'Current CourseBuilder version %s ' % actual_version +
'is less than the version %s ' % self._required_version +
'required by module %s' % self._module_name)
@property
def module_name(self):
self._lazy_load()
return self._module_name
@property
def main_module(self):
self._lazy_load()
return self._main_module
@property
def third_party_libraries(self):
self._lazy_load()
return self._third_party_libraries
@property
def appengine_libraries(self):
self._lazy_load()
return self._appengine_libraries
@property
def tests(self):
self._lazy_load()
return self._tests
| Python |
"""Classes to build sanitized HTML."""
__author__ = 'John Orr (jorr@google.com)'
import cgi
import re
def escape(strg):
return cgi.escape(strg, quote=1).replace("'", ''').replace('`', '`')
class Node(object):
"""Base class for the sanitizing module."""
def __init__(self):
self._parent = None
def _set_parent(self, parent):
self._parent = parent
@property
def parent(self):
return self._parent
@property
def sanitized(self):
raise NotImplementedError()
def __str__(self):
return self.sanitized
# pylint: disable=incomplete-protocol
class NodeList(object):
"""Holds a list of Nodes and can bulk sanitize them."""
def __init__(self):
self.list = []
self._parent = None
def __len__(self):
return len(self.list)
def _set_parent(self, parent):
assert self != parent
self._parent = parent
@property
def parent(self):
return self._parent
def append(self, node):
assert node is not None, 'Cannot add an empty value to the node list'
self.list.append(node)
node._set_parent(self) # pylint: disable=protected-access
return self
@property
def children(self):
return [] + self.list
def empty(self):
self.list = []
return self
def delete(self, node):
_list = []
for child in self.list:
if child != node:
_list.append(child)
self.list = _list
def insert(self, index, node):
assert node is not None, 'Cannot add an empty value to the node list'
self.list.insert(index, node)
node._set_parent(self) # pylint: disable=protected-access
return self
@property
def sanitized(self):
sanitized_list = []
for node in self.list:
sanitized_list.append(node.sanitized)
return ''.join(sanitized_list)
def __str__(self):
return self.sanitized
class Text(Node):
"""Holds untrusted text which will be sanitized when accessed."""
def __init__(self, unsafe_string):
super(Text, self).__init__()
self._value = unicode(unsafe_string)
@property
def sanitized(self):
return escape(self._value)
class Comment(Node):
"""An HTML comment."""
def __init__(self, unsafe_string=''):
super(Comment, self).__init__()
self._value = unicode(unsafe_string)
def get_value(self):
return self._value
@property
def sanitized(self):
return '<!--%s-->' % escape(self._value)
def add_attribute(self, **attr):
pass
def add_text(self, unsafe_string):
self._value += unicode(unsafe_string)
class Element(Node):
"""Embodies an HTML element which will be sanitized when accessed."""
_ALLOWED_NAME_PATTERN = re.compile(r'^[a-zA-Z][_\-a-zA-Z0-9]*$')
_VOID_ELEMENTS = frozenset([
'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen',
'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr'])
def __init__(self, tag_name, **attr):
"""Initializes an element with given tag name and attributes.
Tag name will be restricted to alpha chars, attribute names
will be quote-escaped.
Args:
tag_name: the name of the element, which must match
_ALLOWED_NAME_PATTERN.
**attr: the names and value of the attributes. Names must match
_ALLOWED_NAME_PATTERN and values will be quote-escaped.
"""
assert Element._ALLOWED_NAME_PATTERN.match(tag_name), (
'tag name %s is not allowed' % tag_name)
for attr_name in attr:
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
super(Element, self).__init__()
self._tag_name = tag_name
self._children = []
self._attr = {}
for _name, _value in attr.items():
self._attr[_name.lower()] = _value
def has_attribute(self, name):
return name.lower() in self._attr
@property
def attributes(self):
return self._attr.keys()
def set_attribute(self, name, value):
self._attr[name.lower()] = value
return self
def get_escaped_attribute(self, name):
return escape(self._attr[name.lower()])
def add_attribute(self, **attr):
for attr_name, value in attr.items():
assert Element._ALLOWED_NAME_PATTERN.match(attr_name), (
'attribute name %s is not allowed' % attr_name)
self._attr[attr_name.lower()] = value
return self
def add_child(self, node):
node._set_parent(self) # pylint: disable=protected-access
self._children.append(node)
return self
def append(self, node):
return self.add_child(node)
def add_children(self, node_list):
for child in node_list.list:
self.add_child(child)
return self
def empty(self):
self._children = []
return self
def add_text(self, text):
return self.add_child(Text(text))
def can_have_children(self):
return True
@property
def children(self):
return [] + self._children
@property
def tag_name(self):
return self._tag_name
@property
def sanitized(self):
"""Santize the element and its descendants."""
assert Element._ALLOWED_NAME_PATTERN.match(self._tag_name), (
'tag name %s is not allowed' % self._tag_name)
buff = '<' + self._tag_name
for attr_name, value in sorted(self._attr.items()):
if attr_name == 'classname':
attr_name = 'class'
elif attr_name.startswith('data_'):
attr_name = attr_name.replace('_', '-')
if value is None:
value = ''
buff += ' %s="%s"' % (
attr_name, escape(value))
if self._children:
buff += '>'
for child in self._children:
buff += child.sanitized
buff += '</%s>' % self._tag_name
elif self._tag_name.lower() in Element._VOID_ELEMENTS:
buff += '/>'
else:
buff += '></%s>' % self._tag_name
return buff
class A(Element):
"""Embodies an 'a' tag. Just a conveniece wrapper on Element."""
def __init__(self, href, **attr):
"""Initialize an 'a' tag to a given target.
Args:
href: The value to put in the 'href' tag of the 'a' element.
**attr: the names and value of the attributes. Names must match
_ALLOWED_NAME_PATTERN and values will be quote-escaped.
"""
super(A, self).__init__('a', **attr)
self.add_attribute(href=href)
class ScriptElement(Element):
"""Represents an HTML <script> element."""
def __init__(self, **attr):
super(ScriptElement, self).__init__('script', **attr)
def can_have_children(self):
return False
def add_child(self, unused_node):
raise ValueError()
def add_children(self, unused_nodes):
raise ValueError()
def empty(self):
raise ValueError()
def add_text(self, text):
"""Add the script body."""
class Script(Text):
def __init__(self, script):
# Pylint is just plain wrong about warning here; suppressing.
# pylint: disable=bad-super-call
super(Script, self).__init__(None)
self._script = script
@property
def sanitized(self):
if '</script>' in self._script:
raise ValueError('End script tag forbidden')
return self._script
self._children.append(Script(text))
class Entity(Node):
"""Holds an XML entity."""
ENTITY_PATTERN = re.compile('^&([a-zA-Z]+|#[0-9]+|#x[0-9a-fA-F]+);$')
def __init__(self, entity):
assert Entity.ENTITY_PATTERN.match(entity)
super(Entity, self).__init__()
self._entity = entity
@property
def sanitized(self):
assert Entity.ENTITY_PATTERN.match(self._entity)
return self._entity
def assemble_text_message(text, link):
node_list = NodeList()
if text:
node_list.append(Text(text))
node_list.append(Entity(' '))
if link:
node_list.append(Element(
'a', href=link, target='_blank').add_text('Learn more...'))
return node_list
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General utility functions common to all of CourseBuilder."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import cStringIO
import datetime
import logging
import random
import re
import string
import sys
import traceback
import zipfile
import appengine_config
from google.appengine.api import namespace_manager
BACKWARD_COMPATIBLE_SPLITTER = re.compile(r'[\[\] ,\t\n]+', flags=re.M)
SPLITTER = re.compile(r'[ ,\t\n]+', flags=re.M)
ALPHANUM = string.ascii_letters + string.digits
def text_to_list(text, splitter=SPLITTER):
if not text:
return []
return [item for item in splitter.split(text) if item]
def list_to_text(items):
if not items:
return ''
return ' '.join([unicode(item) for item in items])
def generate_instance_id():
length = 12
return ''.join([random.choice(ALPHANUM) for _ in xrange(length)])
def truncate(x, precision=2):
assert isinstance(precision, int) and precision >= 0
factor = 10 ** precision
return int(x * factor) / float(factor)
def iter_all(query, batch_size=100):
"""Yields query results iterator. Proven method for large datasets."""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = query.with_cursor(prev_cursor)
for entity in query.run(batch_size=batch_size):
any_records = True
yield entity
prev_cursor = query.cursor()
def run_hooks(hooks, *args, **kwargs):
"""Run all the given callback hooks.
Args:
hooks: iterable. The callback functions to be invoked. Each function is
passed the remaining args and kwargs.
*args: List of arguments passed the hook functions.
**kwargs: Dict of keyword args passed to the hook functions.
"""
for hook in hooks:
# TODO(jorr): Add configurable try-catch around call
hook(*args, **kwargs)
class Namespace(object):
"""Save current namespace and reset it.
This is inteded to be used in a 'with' statement. The verbose code:
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(self._namespace)
app_specific_stuff()
finally:
namespace_manager.set_namespace(old_namespace)
can be replaced with the much more terse:
with Namespace(self._namespace):
app_specific_stuff()
This style can be used in classes that need to be pickled; the
@in_namespace function annotation (see below) is arguably visually
cleaner, but can't be used with pickling.
The other use-case for this style of acquire/release guard is when
only portions of a function need to be done within a namespaced
context.
"""
def __init__(self, new_namespace):
self.new_namespace = new_namespace
def __enter__(self):
self.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(self.new_namespace)
return self
def __exit__(self, *unused_exception_info):
namespace_manager.set_namespace(self.old_namespace)
return False # Don't suppress exceptions
def log_exception_origin():
"""Log the traceback of the origin of an exception as a critical error.
When in a try/except block, logging often discards the traceback of the
origin of the thrown exception. This function determines the traceback at
the point of exception and sends that to the standard logging library as a
critical message. This is a common idiom, and the boilerplate code is a
little verbose, so factored here into a separate function.
"""
try:
# Log origin of exception to permit troubleshooting.
# Do this in try/finally block to conform to Python docs'
# recommendation to avoid circular reference to traceback
# object.
origin_traceback = sys.exc_info()[2]
logging.critical(''.join(traceback.format_tb(origin_traceback)))
finally:
pass
def find(predicate, iterable, default=None):
"""Find the first matching item in a list, or None if not found.
This is as a more-usable alternative to filter(), in that it does
not raise an exception if the item is not found.
Args:
predicate: A function taking one argument: an item from the iterable.
iterable: A list or generator providing items passed to "predicate".
default: Value to return if no item is found.
Returns:
The first item in "iterable" where "predicate" returns True, or
None if no item matches.
"""
for item in iterable:
if predicate(item):
return item
return default
class ZipAwareOpen(object):
"""Provide open() services for third party libraries in .zip files.
Some libraries that are commonly downloaded and pushed alongside
CourseBuilder are shipped with data files. These libraries make the
assumption that when shipped in a product, they are packaged as plain
files in a normal directory hierarchy. Thus, when that library is
actually packaged in a .zip file, the open() call will fail. This
class provides a convenient syntax around functionality that wraps
calls to the builtin open() (or in the case of AppEngine, the version
of 'open()' that AppEngine itself provides). When an attempt is made
to open a file that is actually packaged within a .zip file, this
wrapper will intelligently look within the .zip file for that member.
Only read access is supported.
No recursion into .zip files within other .zip files is supported.
Example:
with common_utils.ZipAwareOpen():
third_party_module.some_func_that_calls_open()
"""
THIRD_PARTY_LIB_PATHS = {
l.file_path: l.full_path for l in appengine_config.THIRD_PARTY_LIBS}
def zip_aware_open(self, name, *args, **kwargs):
"""Override open() iff opening a file in a library .zip for reading."""
# First cut: Don't even consider checking .zip files unless the
# open is for read-only and ".zip" is in the filename.
mode = args[0] if args else kwargs['mode'] if 'mode' in kwargs else 'r'
if '.zip' in name and (not mode or mode == 'r' or mode == 'rb'):
# Only consider .zip files known in the third-party libraries
# registered in appengine_config.py
for path in ZipAwareOpen.THIRD_PARTY_LIB_PATHS:
# Don't use zip-open if the file we are looking for _is_
# the sought .zip file. (We are recursed into from the
# zipfile module when it needs to open a file.)
if path in name and path != name:
zf = zipfile.ZipFile(path, 'r')
# Possibly extend simple path to .zip file with relative
# path inside .zip file to meaningful contents.
name = name.replace(
path, ZipAwareOpen.THIRD_PARTY_LIB_PATHS[path])
# Strip off on-disk path to .zip file. This leaves
# us with the absolute path within the .zip file.
name = name.replace(path, '').lstrip('/')
# Return a file-like object containing the data extracted
# from the .zip file for the given name.
data = zf.read(name)
return cStringIO.StringIO(data)
# All other cases pass through to builtin open().
return self._real_open(name, *args, **kwargs)
def __enter__(self):
"""Wrap Python's internal open() with our version."""
# No particular reason to use __builtins__ in the 'zipfile' module; the
# set of builtins is shared among all modules implemented in Python.
self._real_open = sys.modules['zipfile'].__builtins__['open']
sys.modules['zipfile'].__builtins__['open'] = self.zip_aware_open
def __exit__(self, *unused_exception_info):
"""Reset open() to be the Python internal version."""
sys.modules['zipfile'].__builtins__['open'] = self._real_open
return False # Don't suppress exceptions.
def parse_timedelta_string(timedelta_string):
keys = ['weeks', 'days', 'hours', 'minutes', 'seconds']
regex = r'\s*,?\s*'.join([r'((?P<%s>\d+)\s*%s(%s)?s?)?' %
(k, k[0], k[1:-1]) for k in keys])
kwargs = {}
for k, v in re.match(regex,
timedelta_string).groupdict(default='0').items():
kwargs[k] = int(v)
return datetime.timedelta(**kwargs)
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encryption and digest functionality."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import base64
import hashlib
import hmac
import os
import random
import time
import appengine_config
from common import utils
from models import config
from google.appengine.api import users
try:
from Crypto.Cipher import AES
except ImportError:
if appengine_config.PRODUCTION_MODE:
raise
class AES(object):
"""No-op crypto class to permit running on MacOS in dev mode."""
MODE_CBC = 2
@staticmethod
def new(unused_1, unused_2, unused_3):
return AES()
def __init__(self):
pass
def _reverse(self, message):
# "Encrypt" by reversing. Just want to ensure that the encrypted
# version differs from the plaintext so that nothing accidentally
# relies on being able to read the nominally-encrypted value.
m_list = list(message)
m_list.reverse()
return ''.join(m_list)
def encrypt(self, message):
return self._reverse(message)
def decrypt(self, message):
return self._reverse(message)
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = config.ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
ENCRYPTION_SECRET_LENGTH = 48
ENCRYPTION_SECRET = config.ConfigProperty(
'gcb_encryption_secret', str, (
'Text used to encrypt messages. You can set this to any text at all, '
'but the value must be exactly ' + str(ENCRYPTION_SECRET_LENGTH) +
' characters long. If you change this value, the server will be '
'unable to understand items encrypted under the old key.'),
'default value of CourseBuilder encryption secret',
validator=config.ValidateLength(ENCRYPTION_SECRET_LENGTH).validator)
class EncryptionManager(object):
@classmethod
def _init_secret_if_none(cls, cfg, length):
# Any non-default value is fine.
if cfg.value and cfg.value != cfg.default_value:
return
# All property manipulations must run in the default namespace.
with utils.Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):
# Look in the datastore directly.
entity = config.ConfigPropertyEntity.get_by_key_name(cfg.name)
if not entity:
entity = config.ConfigPropertyEntity(key_name=cfg.name)
# Any non-default non-None value is fine.
if (entity.value and not entity.is_draft and
(str(entity.value) != str(cfg.default_value))):
return
# Initialize to random value.
entity.value = base64.urlsafe_b64encode(
os.urandom(int(length * 0.75)))
entity.is_draft = False
entity.put()
@classmethod
def _get_hmac_secret(cls):
"""Verifies that non-default XSRF secret exists; creates one if not."""
cls._init_secret_if_none(XSRF_SECRET, XSRF_SECRET_LENGTH)
return XSRF_SECRET.value
@classmethod
def _get_encryption_secret(cls):
"""Verifies non-default encryption secret exists; creates one if not."""
cls._init_secret_if_none(ENCRYPTION_SECRET, ENCRYPTION_SECRET_LENGTH)
return ENCRYPTION_SECRET.value
@classmethod
def hmac(cls, components):
"""Generate an XSRF over the array of components strings."""
secret = cls._get_hmac_secret()
digester = hmac.new(str(secret))
for component in components:
digester.update(component)
return digester.digest()
@classmethod
def _build_crypto(cls, secret):
if len(secret) != 48:
raise ValueError('Encryption secret must be exactly 48 characters')
return AES.new(secret[:32], AES.MODE_CBC, secret[32:])
@classmethod
def encrypt(cls, message, secret=None):
"""Encrypt a message. Message value returned is not URL-safe."""
message = message or ''
message = '%d.%s' % (len(message), message)
message += '^' * (16 - len(message) % 16)
secret = secret or cls._get_encryption_secret()
return cls._build_crypto(secret).encrypt(message)
@classmethod
def encrypt_to_urlsafe_ciphertext(cls, message, secret=None):
"""Convenience wrapper to get URL-safe version of encrytped data."""
return base64.urlsafe_b64encode(cls.encrypt(message, secret))
@classmethod
def decrypt(cls, message, secret=None):
"""Decrypt a message, returning the original plaintext."""
secret = secret or cls._get_encryption_secret()
crypto = cls._build_crypto(secret)
message = crypto.decrypt(message)
delim_index = message.find('.')
original_length = int(message[:delim_index])
return message[delim_index + 1:delim_index + 1 + original_length]
@classmethod
def decrypt_from_urlsafe_ciphertext(cls, message, secret=None):
return cls.decrypt(base64.urlsafe_b64decode(message), secret)
class XsrfTokenManager(object):
"""Provides XSRF protection by managing action/user tokens in memcache."""
# Max age of the token (4 hours).
XSRF_TOKEN_AGE_SECS = 60 * 60 * 4
# Token delimiters.
DELIMITER_PRIVATE = ':'
DELIMITER_PUBLIC = '/'
# Default nickname to use if a user does not have a nickname,
USER_ID_DEFAULT = 'default'
@classmethod
def _create_token(cls, action_id, issued_on):
"""Creates a string representation (digest) of a token."""
# We have decided to use transient tokens stored in memcache to reduce
# datastore costs. The token has 4 parts: hash of the actor user id,
# hash of the action, hash of the time issued and the plain text of time
# issued.
# Lookup user id.
user = users.get_current_user()
if user:
user_id = user.user_id()
else:
user_id = cls.USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digest = EncryptionManager.hmac(
cls.DELIMITER_PRIVATE.join([
str(user_id), str(action_id), str(issued_on)]))
token = '%s%s%s' % (
issued_on, cls.DELIMITER_PUBLIC, base64.urlsafe_b64encode(digest))
return token
@classmethod
def create_xsrf_token(cls, action):
return cls._create_token(action, time.time())
@classmethod
def is_xsrf_token_valid(cls, token, action):
"""Validate a given XSRF token by retrieving it from memcache."""
try:
parts = token.split(cls.DELIMITER_PUBLIC)
if len(parts) != 2:
return False
issued_on = long(parts[0])
age = time.time() - issued_on
if age > cls.XSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(action, issued_on)
if authentic_token == token:
return True
return False
except Exception: # pylint: disable=broad-except
return False
def get_external_user_id(app_id, namespace, email):
"""Gets an id for a user that can be transmitted to external systems.
The returned key is scoped to a particular user within a particular course
on a particular Course Builder deployment, and is guaranteed to be
statistically unique within that scope.
Args:
app_id: string. Application ID of the CB App Engine deployment.
namespace: string. Namespace of a single course. May be the empty string.
email: string. Unvalidated email address for a user.
Returns:
String.
"""
return hmac.new(
'%s%s%s' % (app_id, namespace, email), digestmod=hashlib.sha256
).hexdigest()
def hmac_sha_2_256_transform(privacy_secret, value):
"""HMAC-SHA-2-256 for use as a privacy transformation function."""
return hmac.new(
str(privacy_secret), msg=str(value), digestmod=hashlib.sha256
).hexdigest()
def generate_transform_secret_from_xsrf_token(xsrf_token, action):
"""Deterministically generate a secret from an XSRF 'nonce'.
When multiple data sources are being via the REST API, consumers
may need to correlate data across the different sources. To take
a particular example, the analytics page on the dashboard is one
such consumer. This function provides a convenient way to turn an
opaque, non-forgeable XSRF token internally into an HMAC secret.
The main point here is that the secret string used for HMAC'ing
the PII in the data source outputs is
- Not derived from anything the user may generate, so the user
cannot manipulate the seed value to experiment to find weaknesses.
- Not predictable given the information the user has. (The user does
not have the encryption key.) The encryption key is used in preference
to using the HMAC key twice.
Args:
xsrf_token: An XSRF token encoded as usual for use as an
HTML parameter.
action: Action expected to be present in the token.
Returns:
None if the XSRF token is invalid, or an encryption key if it is.
"""
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
return None
# Encrypt the publicly-visible xsrf parameter with our private
# encryption secret so that we now have a string which is
# - Entirely deterministic
# - Not generatable by anyone not in posession of the encryption secret.
seed_string = EncryptionManager.encrypt(xsrf_token)
seed = 0
for c in seed_string:
seed *= 256
seed += ord(c)
r = random.Random(seed)
# Use the random seed to deterministically generate a secret which
# will be consistent for identical values of the HMAC token.
return base64.urlsafe_b64encode(
''.join(chr(r.getrandbits(8)) for unused in range(
int(ENCRYPTION_SECRET_LENGTH * 0.75))))
| Python |
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HTML content transformation and manipulation functions.
About
This module performs complex HTML document transformations, which enable
machine-assisted internationalization (I18N) of content.
Extracting a resource bundle from your HTML content
This is done using extract_resource_bundle_from() function. Here is what
happens behind the scenes.
HTML content is received as text and parsed into an XML ElementTree tree with
html5lib library. ElementTree is then converted into safe_dom tree. Already
parsed tree can be provided as well.
The HTML tags in the tree are inspected to extract all contiguous text chunks.
For example: content of <p>...</p> tag is extracted as one chunk, with a
simple markup (like <a>, <b>, <i> and <br>) left inline.
Text chunks are returned as a list of strings. Each string contains plain
text and an inline markup. The markup uses slightly modified original tag
names with the unique index appended and with most attributes removed. For
example:
'<a href="foo" title="Bar">Foo</a><br/><b>Bar</b>'
becomes
'<a#1 title="Bar">Foo</a#1><br#2 /><b#3>Bar</b#3>'
The list of strings, which we will call a 'resource bundle', is ready to be
sent to translator, who must translate both plain text and the text between
inline markup. Reorder of marked up terms is allowed.
When I said 'plain text', I lied a little bit. The strings are expected to
a) be HTML entity encoded and b) be of unicode type in Python. Each of the
strings will be parsed using minidom XML parser. The translator must take care
of the entity encoding, and you as a developer must take care of using proper
charsets in the user interface given to the translator. During the XML parsing
phase UTF-8 is used internally.
Putting translations into your HTML content
This is done using merge_resource_bundle_into() function. Here is what
happens behind the scenes.
The list of strings is received along with the HTML content or an safe_dom
tree of the content to be inserted into. The content is processed as described
above and both the strings and the markup in the original language are
removed.
New strings are inserted one by one into the proper places of the content tree
and inline markup is expanded to have the proper original tags names and the
attributes. The values of attributes like 'alt' and 'title' can be provided in
the translations, other attributes specified in the translations are ignored.
No attempt is made to make sure new strings correspond to the original
strings. Whatever strings are given, those are the ones we will try to weave
into the content. Thus, when the original content changes, it's your
responsibility to diff the resource bundles before and after the edit, send
the delta to translator and compose new updated resource bundle.
The final safe_dom tree with the translations put in is returned. You have
many options how to render it out, including using functions provided
ContentIO.tostring() function.
Common issues
Where is my whitespace?
Whitespace inside and around translation strings is removed intentionally.
Why do I see 'UnicodeDecodeError: 'ascii' codec can't decode byte...'?
you most like forgot the a letter 'u' in front of your Python unicode
string
Resource String Disambiguation
One may encounter two strings that have exact same text in English, but have
to be translated differently due to the context of their use. Simply add a
comment just before the text to be translated. The comment must start with
the 'I18N:', otherwise it is not shown. For example, here a valid i18N
comment: '<!-- I18N: "Android" means "a robot" in this context -->'.
Open Issues:
- P0: complete map_source_to_target() for allow_list_reorder=True
- P0: move all schemas out of dashboard into models; leave UX specific
inputEx annotations of those schemas in dashboard
- P0: clean up and streamline Registry/SchemaFields
- P0: update about herein with details of object bind/map/diff
- P0: get rid of minidom, use cElementTree to reduce parser dependency
- P0: drop '#' and allow <a> and <b> while no disambiguation is required
- P0: how shall safedom handle custom tag nodes that are not yet ready to
be expanded; as proxy nodes?
Good luck!
"""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import difflib
import htmlentitydefs
import re
import StringIO
import sys
import unittest
from xml.dom import minidom
import safe_dom
from tags import html_to_safe_dom
XML_ENCODING = 'utf-8'
# Comments having this prefix will be extracted into the resource bundle; for
# example: <!-- I18N: 'Android' here means 'robot', not 'operating system' -->
I18N_COMMENT_PREFIX = 'I18N:'
# These tags are rendered inline if the don't have any children;
# for example: <a#1>Foo!</a#1>
DEFAULT_INLINE_TAG_NAMES = [
'A', 'B', 'I', 'SPAN', 'BR', 'STRONG', 'EM', 'SMALL']
# These tags are not inspected are rendered inline without any content;
# for example: <script#1 />
DEFAULT_OPAQUE_TAG_NAMES = ['SCRIPT', 'STYLE']
# These tags are inspected and are rendered inline without any content;
# for example: <ul#1 />; their children are extracted and for translation as
# independent items
DEFAULT_OPAQUE_DECOMPOSABLE_TAG_NAMES = [
'UL', 'TABLE', 'IMG', 'INPUT', 'TEXTAREA']
# The key is an attribute name. The value is a set of tag names, for which
# this attribute can be recomposed from resource bundle. All other attributes
# are not recomposable.
DEFAULT_RECOMPOSABLE_ATTRIBUTES_MAP = {
'ALT': set(['*']), 'TITLE': set(['*']), 'SRC': set(['IMG']),
'PLACEHOLDER': set(['INPUT', 'TEXTAREA'])}
# Regex that matches HTML entities (& followed by anything other than a ;, up to
# a ;).
_ENTITY_REGEX = re.compile('(&[a-z]+;)')
# Items we don't want to change to codes when translating HTML entities in XML.
_XML_ENTITY_NAMES = frozenset(['quot', 'amp', 'lt', 'gt', 'apos'])
# pylint: disable=protected-access
def _get_entity_map():
mappings = {}
html_and_xml_entities = dict(htmlentitydefs.entitydefs)
# Python is missing apos, which is part of XML.
html_and_xml_entities['apos'] = None # Set below.
for name, code in html_and_xml_entities.iteritems():
if name in _XML_ENTITY_NAMES:
# In entitydefs, some codes are unicode chars and some are numeric
# references. Standardize on all numeric references for minidom
# compatibility.
code = '&%s;' % name
else:
if not code.startswith('&'):
code = '&#%s;' % str(ord(code))
mappings[name] = code
return mappings
# Map of HTML entity name string ('copy') to ASCII, decimal code string
# ('©'). IMPORTANT: some entities are known to both HTML and XML (see
# _XML_ENTITY_NAMES). In that case, we do not translate to a code because we're
# processing XML. For those items, the value is the entity name (for example,
# for the key 'quot' the value is '"').
_ENTITY_MAP = _get_entity_map()
class ContentIO(object):
"""Class that knows how to load and save HTML content to be translated."""
@classmethod
def _is_simple_text_content(cls, node):
"""Checks if node only has children of type Text."""
simple_text_content = True
children = cls._get_children(node)
for child in children:
if not isinstance(child, safe_dom.Text):
simple_text_content = False
break
return simple_text_content
@classmethod
def _get_children(cls, node_list_or_element):
if isinstance(node_list_or_element, safe_dom.NodeList):
return node_list_or_element.list
if isinstance(node_list_or_element, safe_dom.Element):
return node_list_or_element.children
raise TypeError(
'Expected NodeList/Element, found: %s.' % node_list_or_element)
@classmethod
def _merge_node_lists(cls, node_list, node):
"""Combines sibling or nested node lists into one."""
if isinstance(node, safe_dom.NodeList):
cls._merge_node_list_chidren(node_list, node)
elif isinstance(node, safe_dom.Element):
root_node_list = safe_dom.NodeList()
root_node_list.append(node)
cls._merge_node_list_chidren(node_list, root_node_list)
else:
node_list.append(node)
@classmethod
def _merge_node_list_chidren(cls, target_node_list, node_list):
"""Inspects NodeList and merges its contents recursively."""
_children = [] + node_list.children
node_list.empty()
for child in _children:
if isinstance(child, safe_dom.NodeList):
cls._merge_node_list_chidren(target_node_list, child)
else:
target_node_list.append(child)
if isinstance(child, safe_dom.Element):
cls._merge_element_chidren(child)
@classmethod
def _merge_element_chidren(cls, element):
"""Inspects Element and merges its contents recursively."""
if not element.can_have_children():
return
_children = [] + element.children
element.empty()
_last_node_list_child = None
for child in _children:
if isinstance(child, safe_dom.NodeList):
if _last_node_list_child is None:
_last_node_list_child = safe_dom.NodeList()
cls._merge_node_list_chidren(_last_node_list_child, child)
if _last_node_list_child:
element.append(_last_node_list_child)
else:
_last_node_list_child = None
else:
cls._merge_node_list_chidren(_last_node_list_child, child)
else:
_last_node_list_child = None
element.append(child)
if isinstance(child, safe_dom.Element):
cls._merge_element_chidren(child)
@classmethod
def _normalize_tree(cls, tree):
"""Combines sibling or nested node lists into one."""
node_list = safe_dom.NodeList()
cls._merge_node_lists(node_list, tree)
return node_list
@classmethod
def fromstring(cls, content):
"""Converts HTML string content into an XML tree."""
return (
html_to_safe_dom(unicode(content), None, render_custom_tags=False))
@classmethod
def tostring(cls, tree):
"""Renders tree to as HTML text."""
return tree.sanitized
class TranslationIO(object):
"""Class that knows how to load and save XML translations."""
@classmethod
def _is_indexable(cls, node):
"""Checks if node can have an index of style of <a#1 />."""
return not (isinstance(node, safe_dom.Text) or isinstance(
node, safe_dom.Comment))
@classmethod
def _is_ancestor(cls, descendant, ancestor):
if descendant == ancestor or descendant.parent == ancestor:
return True
if not descendant.parent:
return False
return cls._is_ancestor(descendant.parent, ancestor)
@classmethod
def _set_children(cls, node, children):
if isinstance(node, safe_dom.NodeList):
node.list = children
elif isinstance(node, safe_dom.Element):
node._children = children
else:
raise TypeError('Unsupported node type: %s.' % node)
@classmethod
def _copy_node_content_from_minidom_to_safe_dom(
cls, source_node, target_element):
"""Copies child nodes from source to target."""
if not source_node.childNodes:
return
target_element._children = []
for node in source_node.childNodes:
if node.nodeType == minidom.Node.TEXT_NODE:
target_element.add_child(safe_dom.Text(node.nodeValue))
continue
if node.nodeType == minidom.Node.COMMENT_NODE:
target_element.add_child(safe_dom.Comment(node.nodeValue))
continue
raise TypeError('Unknown node type: %s.' % node)
@classmethod
def _find_replace_for_tag_open(cls, source_delimiter, target_delimiter):
"""Returns regex pattern for replacing delimiter in the open tag."""
return (
r'<([a-zA-Z0-9_\-]+)%s([0-9]+)' % source_delimiter,
'<\\1%s\\2' % target_delimiter)
@classmethod
def _find_replace_for_tag_close(cls, source_delimiter, target_delimiter):
"""Returns regex pattern for replacing delimiter in the closing tag."""
return (
r'</([a-zA-Z0-9_\-]+)%s([0-9]+)>' % source_delimiter,
'</\\1%s\\2>' % target_delimiter)
@classmethod
def _apply_regex(cls, find_replace, content):
_find, _replace = find_replace
return re.sub(_find, _replace, content)
@classmethod
def remove_whitespace(cls, content):
"""Removes whitespace from translation string."""
_content = content
_content = re.sub(r'[\r\n]+', ' ', _content)
_content = re.sub(r'\s\s+', ' ', _content)
return _content.strip()
@classmethod
def _decode_tag_names(cls, content):
"""Decode all tags from 'tag#index' into 'tag-index' style names."""
return cls._apply_regex(
cls._find_replace_for_tag_open('#', '-'), cls._apply_regex(
cls._find_replace_for_tag_close('#', '-'), content))
@classmethod
def _encode_tag_names(cls, content):
"""Encode all tags from 'tag-index' into 'tag#-index' style names."""
return cls._apply_regex(
cls._find_replace_for_tag_open('-', '#'), cls._apply_regex(
cls._find_replace_for_tag_close('-', '#'), content))
@classmethod
def _element_to_translation(cls, config, context, element):
"""Converts safe_dom Element into a resource bundle string."""
lines = []
index = context.index.get_node_index_in_collation(element)
assert index
tag_name = '%s#%s' % (element.tag_name.lower(), index)
start_tag = tag_name
_attributes = element.attributes
if config.sort_attributes:
_attributes = sorted(_attributes)
for attr in _attributes:
tag_name_set = config.recomposable_attributes_map.get(attr.upper())
if tag_name_set and (
element.tag_name.upper() in tag_name_set
or '*' in tag_name_set
):
start_tag += ' %s="%s"' % (
attr, element.get_escaped_attribute(attr))
if element.tag_name.upper() in config.opaque_tag_names:
return False, '<%s />' % start_tag
if element.tag_name.upper() in config.opaque_decomposable_tag_names:
content = None
if element.tag_name.upper() in config.inline_tag_names:
content = []
if element.children:
for child in element.children:
if not isinstance(child, safe_dom.Text):
raise TypeError(
'Unsupported node type: %s.' % child)
value = child.sanitized
content.append(value)
if content:
content = ''.join(content)
else:
content = None
has_content = content or not config.omit_empty_opaque_decomposable
if content:
return has_content, '<%s>%s</%s>' % (
start_tag, content, tag_name)
else:
return has_content, '<%s />' % start_tag
has_content = False
if element.children:
lines.append('<%s>' % start_tag)
for child in element.children:
if not isinstance(child, safe_dom.Text):
raise TypeError('Unsupported node type: %s.' % child)
value = child.sanitized
if value.strip():
has_content = True
lines.append(value)
lines.append('</%s>' % tag_name)
else:
lines.append('<%s />' % start_tag)
return has_content, ''.join(lines)
@classmethod
def _collation_to_translation(cls, config, context, collation):
"""Converts a list of safe_dom nodes into a resource bundle string."""
lines = []
has_content = False
for node in collation:
if isinstance(
node, safe_dom.Comment) or isinstance(node, safe_dom.Text):
value = node.sanitized
if value.strip():
has_content = True
lines.append(value)
continue
if isinstance(node, safe_dom.Element):
_has_content, _value = cls._element_to_translation(
config, context, node)
if _has_content:
has_content = True
lines.append(_value)
continue
raise TypeError('Unsupported node type: %s.' % node)
if not has_content:
return None
return ''.join(lines)
def new_tree(self):
"""Creates new empty tree."""
return minidom.Document()
@classmethod
def parse_indexed_tag_name(cls, node):
try:
# Split off the last component after a '-'. (Note that custom tags
# may contain '-' in their tag names.)
parts = node.tagName.split('-')
index = parts.pop()
tag_name = '-'.join(parts)
return tag_name, int(index)
except:
raise SyntaxError(
'Error extracting index form the tag <%s>. '
'Tag name format is <tag_name#index>, '
'like <a#1>.' % node.tagName)
@classmethod
def extract_line_column_from_parse_error(cls, error):
"""Try to extract line, column from the text of parsing error."""
try:
msg = error.message
match = re.match(r'.*\: line ([0-9]+), column ([0-9]+).*', msg)
if match is not None:
return int(match.group(1)), int(match.group(2))
except: # pylint: disable=bare-except
pass
return None, None
@classmethod
def get_text_fragment(cls, text, line_num, col_num, clip_len=16):
"""Makes an clip_len long excerpt of the text using line and column.
Args:
text: text to make a fragment of
line_num: one-based line number of excerpt start
col_num: one-based column number of excerpt start
clip_len: number of character to leave on both sides of start position
Returns:
tuple clipped text fragment of the entire text if clipping failed
"""
assert clip_len > 0
lines = text.split('\n')
if (line_num is not None
and col_num is not None
and line_num > 0
and line_num <= len(lines)):
line = lines[line_num - 1]
if col_num < 0 or col_num >= len(line):
return text
from_col_num = max(col_num - clip_len, 0)
to_col_num = min(col_num + clip_len, len(line))
result = ''
if from_col_num < col_num:
result += line[from_col_num:col_num]
result += '[%s]' % line[col_num]
if to_col_num > col_num:
result += line[col_num + 1:to_col_num]
return result
return text
@classmethod
def fromstring(cls, content):
"""Converts XML string content of the translation into an XML tree."""
translated_entities = _ENTITY_REGEX.sub(cls._match_to_code, content)
xml_text = '<div>%s</div>' % cls._decode_tag_names(
translated_entities).encode(XML_ENCODING)
try:
tree = minidom.parseString(xml_text)
except Exception as e: # pylint: disable=broad-except
line_num, col_num = cls.extract_line_column_from_parse_error(e)
raise Exception(
e.message, cls.get_text_fragment(xml_text, line_num, col_num))
return tree
@classmethod
def _match_to_code(cls, match):
return _ENTITY_MAP[match.group()[1:-1]]
@classmethod
def toxml(cls, tree):
"""Renders tree as XML text without XML declaration and root node."""
assert 'DIV' == tree.documentElement.tagName.upper()
data = StringIO.StringIO()
for child in tree.documentElement.childNodes:
child.writexml(data)
return data.getvalue()
@classmethod
def tocollation(cls, tree):
"""Converts a tree into a list of nodes no more than one level deep."""
collation = []
for node in tree.documentElement.childNodes:
if node.nodeType == minidom.Node.TEXT_NODE:
collation.append(node)
continue
if node.nodeType == minidom.Node.COMMENT_NODE:
collation.append(node)
continue
if node.nodeType == minidom.Node.ELEMENT_NODE:
for child in node.childNodes:
if child.nodeType not in [
minidom.Node.TEXT_NODE, minidom.Node.COMMENT_NODE]:
raise TypeError(
'Unsupported node child type: %s.' % child.nodeType)
collation.append(node)
continue
raise TypeError('Unsupported node type: %s.' % node.nodeType)
return collation
@classmethod
def get_indexed_tag_name(cls, node, index):
return '%s#%s' % (node.tag_name.lower(), index)
@classmethod
def tostring(cls, tree):
"""Renders tree as a string with <a#1 /> style markup."""
return cls._encode_tag_names(cls.toxml(tree))
class ResourceBundleItemError(Exception):
"""An error related to a specific string in a resource bundle."""
def __init__(self, exc_info, original_exception, index):
Exception.__init__(self, 'Error in chunk %s. %s' % (
index + 1, original_exception))
self._exc_info = exc_info
self._original_exception = original_exception
self._index = index
@property
def exc_info(self):
return self._exc_info
@property
def index(self):
return self._index
@property
def original_exception(self):
return self._original_exception
def reraise(self):
"""Re-raises an exception preserving original stack trace."""
raise self._exc_info[0], self, self._exc_info[2]
class Configuration(object):
"""Various options that control content transformation process."""
def __init__(
self,
inline_tag_names=None,
opaque_tag_names=None,
opaque_decomposable_tag_names=None,
recomposable_attributes_map=None,
omit_empty_opaque_decomposable=True,
sort_attributes=False):
if inline_tag_names is not None:
self.inline_tag_names = inline_tag_names
else:
self.inline_tag_names = DEFAULT_INLINE_TAG_NAMES
if opaque_tag_names is not None:
self.opaque_tag_names = opaque_tag_names
else:
self.opaque_tag_names = DEFAULT_OPAQUE_TAG_NAMES
if opaque_decomposable_tag_names is not None:
self.opaque_decomposable_tag_names = opaque_decomposable_tag_names
else:
self.opaque_decomposable_tag_names = (
DEFAULT_OPAQUE_DECOMPOSABLE_TAG_NAMES)
if recomposable_attributes_map is not None:
self.recomposable_attributes_map = recomposable_attributes_map
else:
self.recomposable_attributes_map = (
DEFAULT_RECOMPOSABLE_ATTRIBUTES_MAP)
self.omit_empty_opaque_decomposable = omit_empty_opaque_decomposable
self.sort_attributes = sort_attributes
class Context(object):
"""Runtime state of the transformation process."""
def __init__(self, tree):
self.tree = ContentIO._normalize_tree(tree)
self.collations = None
self.index = None
self.resource_bundle = None
self.resource_bundle_index_2_collation_index = None
self.append_to_index = None
self.is_dirty = False
def _get_collation_index(self, resource_bundle_index):
return self.resource_bundle_index_2_collation_index[
resource_bundle_index]
def _remove_empty_collations(self):
_collations = []
for collation in self.collations:
if collation:
_collations.append(collation)
self.collations = _collations
def _new_collation(self):
assert self.collations is not None
if not self.collations or self.collations[-1]:
self.collations.append([])
self.append_to_index = len(self.collations) - 1
def _append_collation(self, node):
if not self.collations:
self._new_collation()
self.collations[self.append_to_index].append(node)
class CollationIndex(object):
"""An in-order index of all indexable nodes in the collation."""
def __init__(self):
self._node_to_index = {}
def rebuild(self, context):
for collation in context.collations:
counter = 1
for node in collation:
if TranslationIO._is_indexable(node):
self._node_to_index[node] = counter
counter += 1
else:
self._node_to_index[node] = None
def get_node_index_in_collation(self, node):
return self._node_to_index[node]
def find_node_in_collation(self, collation, node_index):
"""Finds node that has a specific index in the collation."""
for node in collation:
if node_index == self.get_node_index_in_collation(node):
return node
return None
@classmethod
def get_all_indexes_in_collation(cls, context, collation):
"""Returns a set of all possible indexes of nodes in the collation."""
all_indexes = set()
for node in collation:
if TranslationIO._is_indexable(node):
all_indexes.add(context.index.get_node_index_in_collation(node))
return all_indexes
class ContentTransformer(object):
"""Main class that performs content transformation."""
def __init__(self, config=None):
if config is None:
config = Configuration()
self.config = config
def _collate_action_append(self, context, node):
context._append_collation(node)
def _collate_action_inspect_children(self, context, node):
for child in ContentIO._get_children(node):
action = self._get_collate_action(child)
if action:
action(context, child)
def _collate_action_inspect_inline(self, context, node):
if ContentIO._is_simple_text_content(node):
self._collate_action_append(context, node)
else:
self._collate_action_inspect_composite(context, node)
def _collate_action_inspect_opaque(self, context, node):
context._append_collation(node)
def _collate_action_inspect_opaque_decomposable(self, context, node):
context._append_collation(node)
_append_to_index = context.append_to_index
context._new_collation()
self._collate_action_inspect_children(context, node)
context.append_to_index = _append_to_index
def _collate_action_inspect_composite(self, context, node):
context._new_collation()
self._collate_action_inspect_children(context, node)
context._new_collation()
def _get_collate_action(self, node):
if isinstance(node, safe_dom.NodeList):
return self._collate_action_inspect_children
if isinstance(node, safe_dom.Comment):
if node.get_value().strip().find(I18N_COMMENT_PREFIX) == 0:
return self._collate_action_append
else:
return None
if isinstance(node, safe_dom.Text):
return self._collate_action_append
if isinstance(node, safe_dom.Element):
tag_name = node.tag_name
if tag_name.upper() in self.config.inline_tag_names:
return self._collate_action_inspect_inline
if tag_name.upper() in self.config.opaque_tag_names:
return self._collate_action_inspect_opaque
if tag_name.upper() in self.config.opaque_decomposable_tag_names:
return self._collate_action_inspect_opaque_decomposable
return self._collate_action_inspect_composite
raise TypeError(
'Unsupported node type: %s.' % node.__class__.__name__)
@classmethod
def _assert_all_indexed_elements_are_consumed(
cls, context, target_collation, consumed_indexes):
"""Asserts all indexed nodes in the collation were consumed."""
all_indexes = context.index.get_all_indexes_in_collation(
context, target_collation)
if consumed_indexes != all_indexes:
missing_indexes = set(list(all_indexes))
missing_indexes.difference_update(consumed_indexes)
missing_tags = []
for index in missing_indexes:
missing_node = context.index.find_node_in_collation(
target_collation, index)
missing_tags.append(TranslationIO.get_indexed_tag_name(
missing_node, index))
raise LookupError(
'Expected to find the following tags: <%s>.' % (
'>, <'.join(missing_tags)))
@classmethod
def _get_node_index(cls, node, node_list):
node_index = None
index = 0
for child in node_list:
if node == child:
node_index = index
break
index += 1
assert node_index is not None
return node_index
def _replace_children(self, tree, collation, children):
"""Replaces all nodes in the collation with the new nodes."""
first_node = collation[0]
parent = first_node.parent
if not parent:
parent = tree
first_node_index = self._get_node_index(
first_node, ContentIO._get_children(parent))
new_children = []
old_children = ContentIO._get_children(parent)
for index in range(0, len(old_children)):
if index == first_node_index:
for new_child in children:
new_children.append(new_child)
child = old_children[index]
ignore = False
for _child in collation:
if TranslationIO._is_ancestor(_child, child):
ignore = True
break
if not ignore:
new_children.append(child)
TranslationIO._set_children(parent, new_children)
@classmethod
def _copy_selected_node_attributes(
cls, config, source_node, target_element):
"""Copy selected attributes from source to target."""
for key in source_node.attributes.keys():
tag_name_set = config.recomposable_attributes_map.get(
key.upper())
eligible = tag_name_set and (
(source_node.tagName.upper() in tag_name_set) or (
'*' in tag_name_set))
if eligible:
if target_element.has_attribute(key):
target_element.set_attribute(
key, source_node.attributes[key].nodeValue)
def _recompose(self, context, translation, collation_index):
"""Applies translation to the collation."""
_tree = TranslationIO.fromstring(translation)
consumed_indexes = set()
collation = []
for node in TranslationIO.tocollation(_tree):
if node.nodeType == minidom.Node.TEXT_NODE:
collation.append(safe_dom.Text(node.nodeValue))
continue
if node.nodeType == minidom.Node.COMMENT_NODE:
collation.append(safe_dom.Comment(node.nodeValue))
continue
if node.nodeType == minidom.Node.ELEMENT_NODE:
tag_name, index = TranslationIO.parse_indexed_tag_name(node)
node.tagName = tag_name
target_node = context.index.find_node_in_collation(
context.collations[collation_index], index)
if not target_node:
raise LookupError(
'Unexpected tag: <%s#%s>.' % (tag_name, index))
TranslationIO._copy_node_content_from_minidom_to_safe_dom(
node, target_node)
self._copy_selected_node_attributes(
self.config, node, target_node)
consumed_indexes.add(index)
collation.append(target_node)
continue
raise TypeError('Unknown node type: %s.' % node)
self._assert_all_indexed_elements_are_consumed(
context, context.collations[collation_index], consumed_indexes)
self._replace_children(
context.tree, context.collations[collation_index], collation)
def _collate(self, context):
"""Collates XML tree into lists of nodes containing chunks of text."""
self._collate_action_inspect_children(context, context.tree)
context._remove_empty_collations()
context.index.rebuild(context)
def decompose(self, context):
"""Creates a resource bundle from the collations of nodes."""
context.collations = []
context.index = CollationIndex()
self._collate(context)
_index = 0
_collation_index = 0
context.resource_bundle = []
context.resource_bundle_index_2_collation_index = {}
context.append_to_index = None
for collation in context.collations:
value = TranslationIO._collation_to_translation(
self.config, context, collation)
if value:
context.resource_bundle.append(value)
context.resource_bundle_index_2_collation_index[
_index] = _collation_index
_index += 1
_collation_index += 1
def recompose(self, context, resource_bundle, errors=None):
"""Pushes string translations from resource bundle into the tree."""
if context.is_dirty:
raise AssertionError(
'Please create new context; this context is not reusable.')
if context.resource_bundle is None:
raise Exception('Please call decompose() first.')
if len(context.resource_bundle) != len(resource_bundle):
raise IndexError(
'The lists of translations must have the same number of items '
'(%s) as extracted from the original content (%s).' % (
len(resource_bundle), len(context.resource_bundle)))
if errors is None:
errors = []
context.is_dirty = True
for index, item in enumerate(resource_bundle):
try:
self._recompose(
context, item,
context._get_collation_index(index))
except Exception as e: # pylint: disable=broad-except
_error = ResourceBundleItemError(sys.exc_info(), e, index)
errors.append(_error)
if errors:
errors[-1].reraise()
class SourceToTargetMapping(object):
"""Class that maps source to target."""
def __init__(self, name, label, type_name, source_value, target_value):
self._name = name
self._label = label
self._type = type_name
self._source = source_value
self._target = target_value
def __str__(self):
return '%s (%s): %s == %s' % (
self._name, self._type, self._source, self._target)
@property
def name(self):
return self._name
@property
def label(self):
return self._label
@property
def source_value(self):
return self._source
@property
def target_value(self):
return self._target
@property
def type(self):
return self._type
@classmethod
def find_mapping(cls, mappings, name):
for mapping in mappings:
if name == mapping.name:
return mapping
return None
class SourceToTargetDiffMapping(SourceToTargetMapping):
"""Class that maps source to target with diff."""
VERB_NEW = 1 # new source value added, no mapping to target exists
VERB_CHANGED = 2 # source value changed, mapping to target likely invalid
VERB_CURRENT = 3 # source value is mapped to valid target value
ALLOWED_VERBS = [VERB_NEW, VERB_CHANGED, VERB_CURRENT]
SIMILARITY_CUTOFF = 0.5
def __init__(
self, name, label, type_name,
source_value, target_value, verb,
source_value_index, target_value_index):
assert verb in self.ALLOWED_VERBS
super(SourceToTargetDiffMapping, self).__init__(
name, label, type_name, source_value, target_value)
self._verb = verb
self._source_value_index = source_value_index
self._target_value_index = target_value_index
def __str__(self):
return '%s (%s, %s): %s | %s' % (
self._name, self._type, self._verb, self._source, self._target)
@property
def verb(self):
return self._verb
@property
def source_value_index(self):
return self._source_value_index
@property
def target_value_index(self):
return self._target_value_index
@classmethod
def _create_value_mapping(
cls, field_value, source_value, target_value, verb,
source_value_index, target_value_index):
_name = None
_label = None
_type = None
if field_value is not None:
_name = field_value.name
_label = field_value.field.label
_type = field_value.field.type
return SourceToTargetDiffMapping(
_name, _label, _type,
source_value, target_value, verb,
source_value_index, target_value_index)
@classmethod
def map_lists_source_to_target(cls, a, b, allow_reorder=False):
"""Maps items from the source list to a target list."""
return cls._map_lists_source_to_target_with_reorder(a, b) if (
allow_reorder) else cls._map_lists_source_to_target_no_reorder(a, b)
@classmethod
def _map_lists_source_to_target_no_reorder(cls, a, b):
mappings = []
matcher = difflib.SequenceMatcher(None, a, b)
for optcode in matcher.get_opcodes():
tag, i1, i2, j1, j2 = optcode
if 'insert' == tag:
continue
if 'replace' == tag:
changed_len = min(i2 - i1, j2 - j1)
for index in range(i1, i1 + changed_len):
entry = cls._create_value_mapping(
None, a[index], b[j1 + (index - i1)], cls.VERB_CHANGED,
index, j1 + (index - i1))
mappings.append(entry)
for index in range(i1 + changed_len, i2):
entry = cls._create_value_mapping(
None, a[index], None, cls.VERB_NEW, index, None)
mappings.append(entry)
continue
for index in range(i1, i2):
entry = None
if 'equal' == tag:
assert (i2 - i1) == (j2 - j1)
entry = cls._create_value_mapping(
None, a[index], b[j1 + (index - i1)], cls.VERB_CURRENT,
index, j1 + (index - i1))
elif 'delete' == tag:
entry = cls._create_value_mapping(
None, a[index], None, cls.VERB_NEW,
index, None)
else:
raise KeyError()
assert entry is not None
mappings.append(entry)
return mappings
@classmethod
def _map_lists_source_to_target_with_reorder(cls, a, b):
mappings = []
for new_index, _new in enumerate(a):
best_match_index = None
best_score = -1
entry = None
for old_index, _old in enumerate(b):
if _new == _old:
entry = cls._create_value_mapping(
None,
a[new_index], b[old_index], cls.VERB_CURRENT,
new_index, old_index)
break
score = difflib.SequenceMatcher(None, _new, _old).quick_ratio()
if score > best_score:
best_score = score
best_match_index = old_index
if entry:
mappings.append(entry)
continue
if best_score > cls.SIMILARITY_CUTOFF:
entry = cls._create_value_mapping(
None, a[new_index], b[best_match_index], cls.VERB_CHANGED,
new_index, best_match_index)
else:
entry = cls._create_value_mapping(
None, a[new_index], None, cls.VERB_NEW,
new_index, None)
assert entry is not None
mappings.append(entry)
return mappings
@classmethod
def map_source_to_target(
cls, binding,
existing_mappings=None, allowed_names=None, allow_list_reorder=False,
errors=None):
"""Maps binding field value to the existing SourceToTargetMapping.
Args:
binding: an instance of ValueToTypeBinding object
existing_mappings: an array of SourceToTargetMapping holding
existing translations
allowed_names: field names that are subject to mapping
allow_list_reorder: controls whether list items can be reordered
while looking for better matching
errors: an array to receive errors found during mapping process
Returns:
an array of SourceToTargetDiffMapping objects, one per each field
value in the binding passed in
"""
name_to_mapping = {}
if existing_mappings is not None:
for mapping in existing_mappings:
name_to_mapping[mapping.name] = mapping
mapping = []
if allow_list_reorder:
raise NotImplementedError()
for index, field_value in enumerate(binding.value_list):
if allowed_names is not None and (
field_value.name not in allowed_names):
continue
target_value = None
verb = cls.VERB_NEW
translation = name_to_mapping.get(field_value.name)
if translation:
if translation.type != field_value.field.type:
_error = AssertionError(
'Source and target types don\'t match: %s, %s.' % (
field_value.field.type, translation.type))
if errors is not None:
_error = ResourceBundleItemError(
sys.exc_info(), _error, index)
errors.append(_error)
continue
else:
raise _error
target_value = translation.target_value
if translation.source_value != field_value.value:
verb = cls.VERB_CHANGED
else:
verb = cls.VERB_CURRENT
source_value = field_value.value
entry = cls._create_value_mapping(
field_value, source_value, target_value, verb, None, None)
mapping.append(entry)
return mapping
def extract_resource_bundle_from(
tree=None, html=None, context=None, config=None):
"""Extracts resource bundle from the HTML string of tree.
Args:
tree: an XML tree of HTML content to use; required if content is None
html: a string with HTML content to use; required if tree is None
context: translation context
config: configuration options
Returns:
a (context, transformer) tuple.
"""
if config is None:
config = Configuration()
transformer = ContentTransformer(config=config)
if tree is None and html is not None:
tree = ContentIO.fromstring(html)
context = Context(tree)
transformer.decompose(context)
return context, transformer
def merge_resource_bundle_into(
tree=None, html=None, context=None, config=None, resource_bundle=None,
errors=None):
"""Weaves strings from the resource bundle into the content.
Args:
tree: an XML tree of HTML content to use; required if content is None
html: a string with HTML content to use; required if tree is None
context: translation context
config: configuration options
resource_bundle: a list of strings containing translations in the same
order and in the same quality that a list of strings in the resource
bundle returned by extract_resource_bundle_from()
errors: a list to receive errors
Returns:
a (context, transformer) tuple.
"""
context, transformer = extract_resource_bundle_from(
tree=tree, html=html, context=context, config=config)
transformer.recompose(context, resource_bundle, errors=errors)
return context, transformer
class ListsDifflibTests(unittest.TestCase):
"""Tests our understanding of difflib as applied to ordered lists."""
def test_diff_two_string_lists_works(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'was', 'blue', '!']
matcher = difflib.SequenceMatcher(None, newest, oldest)
expected_verbs = ['equal', 'replace', 'equal']
for index, optcode in enumerate(matcher.get_opcodes()):
tag, _, _, _, _ = optcode
self.assertEqual(expected_verbs[index], tag)
def test_diff_two_string_lists_no_reorder(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'is', 'sky', 'blue', '!']
matcher = difflib.SequenceMatcher(None, newest, oldest)
expected_verbs = ['equal', 'insert', 'equal', 'delete', 'equal']
for index, optcode in enumerate(matcher.get_opcodes()):
tag, _, _, _, _ = optcode
self.assertEqual(expected_verbs[index], tag)
def test_map_lists_source_to_target_identity(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'is', 'blue', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 1),
('is', 'is', SourceToTargetDiffMapping.VERB_CURRENT, 2, 2),
('blue', 'blue', SourceToTargetDiffMapping.VERB_CURRENT, 3, 3),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 4)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
def test_map_lists_source_to_target_no_reorder_but_changed(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'was', 'blue', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 1),
('is', 'was', SourceToTargetDiffMapping.VERB_CHANGED, 2, 2),
('blue', 'blue', SourceToTargetDiffMapping.VERB_CURRENT, 3, 3),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 4)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
def test_map_lists_source_to_target_no_reorder_and_remove_insert(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'is', 'sky', 'blue', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 2),
('is', None, SourceToTargetDiffMapping.VERB_NEW, 2, None),
('blue', 'blue', SourceToTargetDiffMapping.VERB_CURRENT, 3, 3),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 4)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
def test_map_lists_source_to_target_no_reorder_and_new(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'blue', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 1),
('is', None, SourceToTargetDiffMapping.VERB_NEW, 2, None),
('blue', 'blue', SourceToTargetDiffMapping.VERB_CURRENT, 3, 2),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 3)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
def test_map_lists_source_to_target_no_reorder_change_and_new(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'is', 'BLUE']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 1),
('is', 'is', SourceToTargetDiffMapping.VERB_CURRENT, 2, 2),
('blue', 'BLUE', SourceToTargetDiffMapping.VERB_CHANGED, 3, 3),
('!', None, SourceToTargetDiffMapping.VERB_NEW, 4, None)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
class SetsDifflibUtils(unittest.TestCase):
"""Tests our understanding of difflib as applied to lists and sets."""
def test_diff_two_string_lists_with_reorder(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'is', 'sky', 'blue', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest, allow_reorder=True)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 2),
('is', 'is', SourceToTargetDiffMapping.VERB_CURRENT, 2, 1),
('blue', 'blue', SourceToTargetDiffMapping.VERB_CURRENT, 3, 3),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 4)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
def test_diff_two_string_lists_with_reorder_over_cutoff(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'is', 'blUe', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest, allow_reorder=True)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 1),
('is', 'is', SourceToTargetDiffMapping.VERB_CURRENT, 2, 2),
('blue', 'blUe', SourceToTargetDiffMapping.VERB_CHANGED, 3, 3),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 4)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
def test_diff_two_string_lists_with_reorder_under_cutoff(self):
newest = ['The', 'sky', 'is', 'blue', '!']
oldest = ['The', 'sky', 'is', 'BLUE', '!']
mappings = SourceToTargetDiffMapping.map_lists_source_to_target(
newest, oldest, allow_reorder=True)
expected_mappings = [
('The', 'The', SourceToTargetDiffMapping.VERB_CURRENT, 0, 0),
('sky', 'sky', SourceToTargetDiffMapping.VERB_CURRENT, 1, 1),
('is', 'is', SourceToTargetDiffMapping.VERB_CURRENT, 2, 2),
('blue', None, SourceToTargetDiffMapping.VERB_NEW, 3, None),
('!', '!', SourceToTargetDiffMapping.VERB_CURRENT, 4, 4)]
self.assertEqual(
expected_mappings, [(
mapping.source_value, mapping.target_value,
mapping.verb,
mapping.source_value_index, mapping.target_value_index
) for mapping in mappings])
class TestCasesForIO(unittest.TestCase):
"""Tests for content/translation input/output."""
def _containers(self):
return [safe_dom.A('http://'), safe_dom.Element('div')]
def _leafs(self):
return [
safe_dom.Comment('comment'),
safe_dom.Entity('>'),
safe_dom.Text('text'),
safe_dom.ScriptElement()]
def _all(self):
return [] + self._containers() + self._leafs()
def test_merge_single_element(self):
for _elem in self._all():
_result = safe_dom.NodeList()
ContentIO._merge_node_lists(_result, _elem)
self.assertEqual(_result.list, [_elem])
def test_merge_stack_of_node_lists_leaf_element(self):
for _elem in self._all():
_list1 = safe_dom.NodeList()
_list2 = safe_dom.NodeList()
_list3 = safe_dom.NodeList()
_list1.append(_list2)
_list2.append(_list3)
_list3.append(_elem)
self.assertEqual(_list1.list, [_list2])
self.assertEqual(_list2.list, [_list3])
self.assertEqual(_list3.list, [_elem])
_result = safe_dom.NodeList()
ContentIO._merge_node_lists(_result, _list1)
self.assertEqual(_result.list, [_elem])
def test_merge_stack_of_node_lists_non_leaf_element(self):
for _bar in self._containers():
for _foo in self._all():
_bar.empty()
_list1 = safe_dom.NodeList()
_list2 = safe_dom.NodeList()
_list3 = safe_dom.NodeList()
_bar.add_child(_list1)
_list1.append(_list2)
_list2.append(_list3)
_list3.append(_foo)
self.assertEqual(_list1.list, [_list2])
self.assertEqual(_list2.list, [_list3])
self.assertEqual(_list3.list, [_foo])
_result = safe_dom.NodeList()
ContentIO._merge_node_lists(_result, _bar)
self.assertEqual(_result.list, [_bar])
self.assertEqual(
_bar.children[0].list, [_foo],
'%s >>> %s' % (_bar, _foo))
def test_merge_sibling_node_lists_leaf_element(self):
for _bar in self._all():
for _foo in self._all():
_list1 = safe_dom.NodeList()
_list2 = safe_dom.NodeList()
_list3 = safe_dom.NodeList()
_list1.append(_list2)
_list1.append(_list3)
_list2.append(_foo)
_list3.append(_bar)
self.assertEqual(_list1.list, [_list2, _list3])
self.assertEqual(_list2.list, [_foo])
self.assertEqual(_list3.list, [_bar])
_result = safe_dom.NodeList()
ContentIO._merge_node_lists(_result, _list1)
self.assertEqual(_result.list, [_foo, _bar])
def test_merge_stack_and_sibling_lists(self):
for _elem in self._containers():
_list1 = safe_dom.NodeList()
_list2 = safe_dom.NodeList()
_list3 = safe_dom.NodeList()
_list4 = safe_dom.NodeList()
_list1.append(_list2)
_list2.append(_elem)
_elem.add_child(_list3)
_list3.append(_list4)
self.assertEqual(_elem.children, [_list3])
self.assertEqual(_list3.list, [_list4])
_result = safe_dom.NodeList()
ContentIO._merge_node_lists(_result, _list1)
self.assertEqual(_result.list, [_elem])
self.assertEqual(_elem.children, [])
def test_translation_to_minidom(self):
translation = 'The <a#1 href="foo">skies</a#1> are <b#2>blue</b#2>.'
tree_as_text = 'The <a-1 href="foo">skies</a-1> are <b-2>blue</b-2>.'
dom = TranslationIO.fromstring(translation)
self.assertEqual(tree_as_text, TranslationIO.toxml(dom))
self.assertEqual(translation, TranslationIO.tostring(dom))
def test_minidom_is_casesensitive(self):
translation = 'The <SPAN#1>skies</SPAN#1>.'
TranslationIO.fromstring(translation)
translation = 'The <span#1>skies</SPAN#1>.'
with self.assertRaises(Exception):
TranslationIO.fromstring(translation)
translation = 'The <SPAN#1>skies</span#1>.'
with self.assertRaises(Exception):
TranslationIO.fromstring(translation)
def test_fromstring_translates_html_entities_for_minidom(self):
original = u'The skies® are © copyrighted.'
parsed = u'The skies\xae are \xa9 copyrighted.'
dom = TranslationIO.fromstring(original)
self.assertEqual(parsed, TranslationIO.toxml(dom))
self.assertEqual(parsed, TranslationIO.tostring(dom))
def test_fromstring_does_not_translate_xml_entities_for_minidom(self):
original = u'Hello, " & < > ' world.'
dom = TranslationIO.fromstring(original)
# We leave ' as &apos, but minidom turns it to '.
self.assertEqual(
u"Hello, " & < > ' world.",
TranslationIO.toxml(dom))
self.assertEqual(
u"Hello, " & < > ' world.",
TranslationIO.tostring(dom))
def test_entity_map_converts_all_html_codes_to_base_10_ascii(self):
for name, code in _ENTITY_MAP.iteritems():
if name not in _XML_ENTITY_NAMES:
int(code[2:-1], base=10)
self.assertTrue(code.startswith('&') and code.endswith(';'))
# Spot check a few values.
self.assertEqual('©', _ENTITY_MAP.get('copy'))
self.assertEqual('®', _ENTITY_MAP.get('reg'))
def test_entity_map_xml_entity_values_are_keynames_with_amp_and_semi(self):
for xml_entity in _XML_ENTITY_NAMES:
self.assertEqual('&%s;' % xml_entity, _ENTITY_MAP.get(xml_entity))
def test_html_to_safedom(self):
html = '''
Let's start!
<p>First!</>
Some random <b>markup</b> text!
<p>
<!-- comment -->
The <b>skies</b> are <a href="foo">blue</a>.
The <b>roses</b> are <a href="bar">red</a>!
<script>alert('Foo!');</script>
<style>{ width: 100%; }</style>
</p>
<p>Last!</p>
We are done!
'''
tree_as_text = '''
Let's start!
<p>First!
Some random <b>markup</b> text!
</p><p>
<!-- comment -->
The <b>skies</b> are <a href="foo">blue</a>.
The <b>roses</b> are <a href="bar">red</a>!
<script>alert('Foo!');</script>
<style>{ width: 100%; }</style>
</p>
<p>Last!</p>
We are done!
'''
self.assertEqual(
tree_as_text,
ContentIO.tostring(ContentIO.fromstring(html)))
def test_parse_error_interpretation(self):
# test expected error message
error = Exception('not well-formed (invalid token): line 66, column 99')
line_num, col_num = TranslationIO.extract_line_column_from_parse_error(
error)
self.assertEquals(66, line_num)
self.assertEquals(99, col_num)
# test text that does not have line & column
self.assertEquals(
(None, None),
TranslationIO.extract_line_column_from_parse_error('Some text.'))
# test clipping
text = 'The sky is blue!'
self.assertEquals(
'[T]he s',
TranslationIO.get_text_fragment(text, 1, 0, clip_len=5))
self.assertEquals(
'The s[k]y is',
TranslationIO.get_text_fragment(text, 1, 5, clip_len=5))
# text out of bounds conditions
self.assertEquals(
text,
TranslationIO.get_text_fragment(text, 1, 16, clip_len=5))
self.assertEquals(
text, TranslationIO.get_text_fragment(text, 1, 99))
self.assertEquals(
text, TranslationIO.get_text_fragment(text, 1, -1))
self.assertEquals(text, TranslationIO.get_text_fragment(text, -1, -1))
class TestCasesBase(unittest.TestCase):
"""Base class for testing translations."""
def setUp(self):
self.transformer = ContentTransformer()
def tearDown(self):
self.tree = None
self.transformer = None
self.context = None
@classmethod
def _remove_whitespace(cls, content):
content = content.replace('\n', ' ').replace('\r', ' ')
content = re.sub(r'\s+', ' ', content)
content = re.sub(r'>\s+', '>', content)
content = re.sub(r'\s+/>', '/>', content)
content = re.sub(r'\s+<', '<', content)
return content.strip()
def _assert_collated_nodes_have_same_parent(self, collation):
parent = None
for node in collation:
if parent is None:
parent = node.parent
assert parent == node.parent
def _assert_decomposes(
self, content, resource_bundle, ignore_whitespace=True):
self.context = Context(ContentIO.fromstring(content))
self.transformer.decompose(self.context)
for collation in self.context.collations:
self._assert_collated_nodes_have_same_parent(collation)
if resource_bundle is not None:
self.assertEqual(
len(resource_bundle), len(self.context.resource_bundle))
for index, _ in enumerate(resource_bundle):
if ignore_whitespace:
self.assertEqual(
self._remove_whitespace(resource_bundle[index]),
self._remove_whitespace(
self.context.resource_bundle[index]))
else:
self.assertEqual(
resource_bundle[index],
self.context.resource_bundle[index])
if not self.context.resource_bundle:
self.assertEqual(
{},
self.context.resource_bundle_index_2_collation_index)
def _assert_recomposes(self, resource_bundle, result):
self.transformer.recompose(self.context, resource_bundle)
self.assertEqual(
self._remove_whitespace(result),
self._remove_whitespace(ContentIO.tostring(self.context.tree)))
def _assert_recomposes_error(self, resource_bundle):
failed = True
result = None
try:
errors = []
self.transformer.recompose(
self.context, resource_bundle, errors=errors)
failed = False
except Exception as e: # pylint: disable=broad-except
if errors:
return errors[0]
return e
if not failed:
raise Exception('Expected to fail.' % ContentIO.tostring(
result) if result else None)
class TestCasesForContentDecompose(TestCasesBase):
"""Tests for content decomposition phase."""
def test_i18n_comment_is_preserved(self):
original = 'Hello <!-- I18N: special comment -->world!'
expected = ['Hello <!-- I18N: special comment -->world!']
self._assert_decomposes(original, expected)
return original
def test_i18n_non_comment_is_removed(self):
original = 'Hello <!-- just a comment -->world!'
self._assert_decomposes(original, ['Hello world!'])
def test_extract_simple_value_no_markup(self):
original = 'The skies are blue.'
expected = ['The skies are blue.']
self._assert_decomposes(original, expected)
def test_extract_simple_value_with_br(self):
original = 'The skies are <br />blue.'
expected = ['The skies are <br#1 />blue.']
self._assert_decomposes(original, expected)
def test_extract_value_with_inline(self):
html = 'The <a href="foo">sky</a> is blue.'
expected = ['The <a#1>sky</a#1> is blue.']
self._assert_decomposes(html, expected)
def test_extract_value_with_nested_inline(self):
html = 'The <a href="foo"><b>ocean</b> liner</a> is blue.'
expected = ['The', '<b#1>ocean</b#1> liner', 'is blue.']
self._assert_decomposes(html, expected)
def test_extract_simple_value_with_only_non_ascii_no_markup(self):
original = u'<p>Трава зеленая.</p>'
expected = [u'Трава зеленая.']
self._assert_decomposes(original, expected)
def test_extract_simple_value_with_only_non_ascii_and_markup(self):
original = u'Трава <b>зеленая</b>.'
expected = [u'Трава <b#1>зеленая</b#1>.']
self._assert_decomposes(original, expected)
def test_extract_simple_value_with_entity(self):
original = 'The skies < are blue.'
expected = ['The skies < are blue.']
self._assert_decomposes(original, expected)
def test_extract_simple_value_with_entity_2(self):
original = '''Let's start!'''
expected = ['Let's start!']
self._assert_decomposes(original, expected)
def test_extract_nothing_to_translate(self):
original = '\n\n <script>alert("Foo!");</script>\n\n '
self._assert_decomposes(original, [])
def test_extract_nothing_to_translate_2(self):
original = '\n\n <a href="#foo" />\n\n '
self._assert_decomposes(original, [])
def test_extract_script_value(self):
original = 'The skies <script>alert("Foo!");</script> are blue.'
expected = ['The skies <script#1 /> are blue.']
self._assert_decomposes(original, expected)
config = Configuration(opaque_tag_names=[])
self.transformer = ContentTransformer(config=config)
original = 'The skies <script>alert("Foo!");</script> are blue.'
expected = ['The skies', 'alert("Foo!");', 'are blue.']
self._assert_decomposes(original, expected)
def test_extract_script_and_style_value(self):
original = (
'The skies <script>alert("Foo!");</script> are '
'<style> { color: blue; } </style> blue.')
expected = ['The skies <script#1 /> are <style#2 /> blue.']
self._assert_decomposes(original, expected)
def test_extract_one_complex_value(self):
html = '''begin
<p>
The <a href='foo'>skies</a> are <a href="bar">blue</a>.
</p>
end'''
expected = ['begin', 'The <a#1>skies</a#1> are <a#2>blue</a#2>.', 'end']
self._assert_decomposes(html, expected)
self.assertEqual(
{0: 0, 1: 1, 2: 2},
self.context.resource_bundle_index_2_collation_index)
def test_resource_bundle_to_collation_mapping(self):
html = '''
<p>
The <a href='foo'>skies</a> are <a href="bar">blue</a>.
</p>
'''
expected = ['The <a#1>skies</a#1> are <a#2>blue</a#2>.']
self._assert_decomposes(html, expected)
self.assertEqual(3, len(self.context.collations))
self.assertEqual(
{0: 1},
self.context.resource_bundle_index_2_collation_index)
def test_extract_many_complex_values(self):
html = '''begin
<p>
The <a href="foo">skies</a> are <a href="bar">blue</a>.
</p>
followed by more <a href="baz">text</a> with markup
<p>
The <span class="red">roses</span> are <a href="y">red</a>.
</p>
end'''
expected = [
'begin',
'The <a#1>skies</a#1> are <a#2>blue</a#2>.',
'followed by more <a#1>text</a#1> with markup',
'The <span#1>roses</span#1> are <a#2>red</a#2>.',
'end']
self._assert_decomposes(html, expected)
def test_extract_complex_value_with_unicode(self):
original = u'''
begin
<p>
The <b>skies</b> are <a href="foo">blue</a>.
<p>Трава <b>зеленая</b>.</p>
The <b>roses</b> are <a href="bar">red</a>!
</p>
end
'''
expected = [
'begin',
'The <b#1>skies</b#1> are <a#2>blue</a#2>.',
u'Трава <b#1>зеленая</b#1>.',
'The <b#1>roses</b#1> are <a#2>red</a#2>!',
'end'
]
self._assert_decomposes(original, expected)
def test_extract_ul_value(self):
original = '''
Start!
<ul>
The skies are <li>blue</li> and <li>red</li>.
</ul>
Done!
'''
expected = [
'Start!\n <ul#1 />\n Done!',
'The skies are',
'blue',
'and',
'red',
'.']
self._assert_decomposes(original, expected)
def test_extract_nested_elements(self):
original = '''
<p>
The skies can be:
<ul>
<li>red</li>
<li>blue</li>
</ul>
in the fall.
</p>
'''
# TODO(psimakov): undesirable, but the parser closes <p> before new <ul>
expected = [
'The skies can be:',
'<ul#1 />\n in the fall.',
'red',
'blue']
self._assert_decomposes(original, expected)
def test_extract_decompose_can_be_called_many_times(self):
html = 'The <a href="foo">sky</a> is blue.'
expected = ['The <a#1>sky</a#1> is blue.']
self._assert_decomposes(html, expected)
self._assert_decomposes(html, expected)
self._assert_decomposes(html, expected)
def test_extract_decompose_opaque_translatable(self):
config = Configuration(
omit_empty_opaque_decomposable=False,
sort_attributes=True)
self.transformer = ContentTransformer(config)
html = '<img src="foo" />'
expected = ['<img#1 src="foo" />']
self._assert_decomposes(html, expected)
html = '<img src="foo" alt="bar"/>'
expected = ['<img#1 alt="bar" src="foo" />']
self._assert_decomposes(html, expected)
html = '<img alt="bar" src="foo" />'
expected = ['<img#1 alt="bar" src="foo" />']
self._assert_decomposes(html, expected)
html = '<img alt="bar" src="foo" title="baz"/>'
expected = ['<img#1 alt="bar" src="foo" title="baz" />']
self._assert_decomposes(html, expected)
html = '<img src="foo" alt="bar" title="baz"/>'
expected = ['<img#1 alt="bar" src="foo" title="baz" />']
self._assert_decomposes(html, expected)
def test_extract_decompose_custom_tag_with_attribute(self):
config = Configuration(
inline_tag_names=['FOO'],
opaque_decomposable_tag_names=['FOO'],
omit_empty_opaque_decomposable=False)
self.transformer = ContentTransformer(config)
html = '<div><foo alt="bar"></foo></div>'
expected = ['<foo#1 alt="bar" />']
self._assert_decomposes(html, expected)
html = '<div><foo alt="bar">baz</foo></div>'
expected = ['<foo#1 alt="bar">baz</foo#1>']
self._assert_decomposes(html, expected)
def test_extract_large_sample_document(self):
self.maxDiff = None
original = ContentIO.tostring(ContentIO.fromstring(
SAMPLE_HTML_DOC_CONTENT))
self._assert_decomposes(original, SAMPLE_HTML_DOC_DECOMPOSE)
def test_extract_resource_bundle_from(self):
original = '<p>The <a href="foo">skies</a> are blue!</p>'
expected = ['The <a#1>skies</a#1> are blue!']
context, _ = extract_resource_bundle_from(html=original)
self.assertEqual(expected, context.resource_bundle)
class TestCasesForContentRecompose(TestCasesBase):
"""Tests for content decomposition phase."""
def test_recompose_i18n_comment_is_preserved(self):
html = 'Hello <!-- I18N: special comment -->world!'
self._assert_decomposes(html, None)
translations = ['HELLO <!-- I18N: special comment -->WORLD!']
result = 'HELLO <!-- I18N: special comment -->WORLD!'
self._assert_recomposes(translations, result)
def test_recompose_one_complex_value(self):
html = '''begin
<p>
The <a href="foo">skies</a> are <a href="bar">blue</a>.
</p>
end'''
self._assert_decomposes(html, None)
translations = [
'BEGIN', 'The <a#1>SKIES</a#1> ARE <a#2>BLUE</a#2>.', 'END']
result = '''BEGIN
<p>
The <a href="foo">SKIES</a> ARE <a href="bar">BLUE</a>.
</p>
END'''
self._assert_recomposes(translations, result)
def test_recompose_complex_value_mixed_tags(self):
html = '''
Start!
<p>
The <b>skies</b> are <a href="foo">blue</a>.
The <b>roses</b> are <a href="bar">red</a>!
</p>
Done!
'''
expected = [
'Start!',
'''The <b#1>skies</b#1> are <a#2>blue</a#2>.
The <b#3>roses</b#3> are <a#4>red</a#4>!''',
'Done!']
self._assert_decomposes(html, expected)
translations = [
'START!',
'''The <b#1>SKIES</b#1> ARE <a#2>BLUE</a#2>.
The <b#3>roses</b#3> ARE <a#4>RED</a#4>!''',
'DONE!']
result = '''START!<p>The <b>SKIES</b> ARE <a href="foo">BLUE</a>.
The <b>roses</b> ARE <a href="bar">RED</a>!</p>DONE!'''
self._assert_recomposes(translations, result)
def test_recompose_multiple_complex_values_with_mixed_tags(self):
html = '''
Start!
<p>
The <b>skies</b> are <a href="foo">blue</a>.
</p>
<p>
The <b>roses</b> are <a href="bar">red</a>!
</p>
Done!
'''
expected = [
'Start!',
'The <b#1>skies</b#1> are <a#2>blue</a#2>.',
'The <b#1>roses</b#1> are <a#2>red</a#2>!',
'Done!']
self._assert_decomposes(html, expected)
translations = [
'START!',
'The <b#1>SKIES</b#1> ARE <a#2>blue</a#2>.',
'THE <b#1>roses</b#1> are <a#2>RED</a#2>!',
'DONE!']
result = (
'START!'
'<p>The <b>SKIES</b> ARE <a href="foo">blue</a>.</p>'
'<p>THE <b>roses</b> are <a href="bar">RED</a>!</p>'
'DONE!')
self._assert_recomposes(translations, result)
def test_recompose_complex_value(self):
html = """
<h1>
<a href="/">
<img alt="Google"
src="//www.google.com/images/logos/google_logo_41.png">
Open Online Education</a>
</h1>
<a class="maia-teleport" href="#content">Skip to content</a>
"""
expected = [
'<img#1 src="//www.google.com/images/logos/google_logo_41.png" '
'alt="Google" />\n Open Online Education',
'<a#1>Skip to content</a#1>']
self._assert_decomposes(html, expected)
translations = [
'<img#1 src="//www.google.com/images/logos/google_logo_99.png" '
'alt="Google+" />\n Open ONLINE Education',
'<a#1>SKIP to content</a#1>']
result = """
<h1>
<a href="/">
<img alt="Google+"
src="//www.google.com/images/logos/google_logo_99.png" />
Open ONLINE Education</a>
</h1>
<a class="maia-teleport" href="#content">SKIP to content</a>
"""
self._assert_recomposes(translations, result)
def test_recompose_complex_value_2(self):
html = (
'The <a class="foo">skies</a> '
'<p>are <i>not</i></p>'
' always <a href="bar">blue</a>.')
expected = [
'The <a#1>skies</a#1>',
'are <i#1>not</i#1>',
'always <a#1>blue</a#1>.']
self._assert_decomposes(html, expected)
translations = [
'The <a#1>SKIES</a#1> ',
'ARE <i#1>NOT</i#1>',
' ALWAYS <a#1>blue</a#1>.']
result = (
'The <a class="foo">SKIES</a> '
'<p>ARE <i>NOT</i></p>'
' ALWAYS <a href="bar">blue</a>.')
self._assert_recomposes(translations, result)
def test_textarea_self_closing_fails_parse(self):
# TODO(psimakov): fix this
html = 'foo <textarea name="bar"/> baz'
expected = ['foo', 'baz']
with self.assertRaises(AssertionError):
self._assert_decomposes(html, expected)
unexpected = ['foo <textarea#1 />', 'baz</div>']
self._assert_decomposes(html, unexpected)
def test_placeholder(self):
config = Configuration(omit_empty_opaque_decomposable=False)
self.transformer = ContentTransformer(config)
html = '<textarea class="foo" placeholder="bar">baz</textarea>'
expected = ['<textarea#1 placeholder="bar" />', 'baz']
self._assert_decomposes(html, expected)
def test_recompose_complex_ul(self):
config = Configuration(omit_empty_opaque_decomposable=False)
self.transformer = ContentTransformer(config)
html = '''
<ul class="foo">
<li>sss</li>
<li index="bar">ttt</li>
<li>xxx</li>
<li>yyy</li>
<li>zzz</li>
</ul>
'''
expected = ['<ul#1 />', 'sss', 'ttt', 'xxx', 'yyy', 'zzz']
self._assert_decomposes(html, expected)
translations = ['<ul#1 />', 'SSS', 'TTT', 'XXX', 'YYY', 'ZZZ']
result = '''
<ul class="foo">
<li>SSS</li>
<li index="bar">TTT</li>
<li>XXX</li>
<li>YYY</li>
<li>ZZZ</li>
</ul>
'''
self._assert_recomposes(translations, result)
def test_recompose_complex_with_opaque_docomposable(self):
config = Configuration(omit_empty_opaque_decomposable=False)
self.transformer = ContentTransformer(config)
html = u"""
<table border="2">
<tbody>
<tr>
<td>
<i>table</i>
<p></p>
<ul>
<li>a</li>
<li>b</li>
</ul>
<p></p>
</td>
</tr>
</tbody>
</table>"""
expected = [
'<table#1 />', '<i#1>table</i#1>', '<ul#1 />', 'a', 'b']
self._assert_decomposes(html, expected)
translations = [
'<table#1/>', '<i#1>TABLE</i#1>', '<ul#1/>', 'A', 'B']
result = (
'<table border="2">'
'<tbody>'
'<tr>'
'<td>'
'<i>TABLE</i>'
'<p></p>'
'<ul>'
'<li>A</li>'
'<li>B</li>'
'</ul>'
'<p></p>'
'</td>'
'</tr>'
'</tbody>'
'</table>')
self._assert_recomposes(translations, result)
def test_recompose_empty_p_is_roundtripped(self):
html = 'The skies are blue.<p></p>The roses are red.'
self._assert_decomposes(html, None)
translation = ['The SKIES are blue. ', 'The roses are RED.']
result = 'The SKIES are blue.<p></p>The roses are RED.'
self._assert_recomposes(translation, result)
def test_recompose_translation_with_no_significant_markup(self):
html = 'The skies are blue.<p>Maybe...</p>The roses are red.'
self._assert_decomposes(html, None)
translation = ['The SKIES are blue.', 'MAYBE...', 'The roses are RED.']
result = 'The SKIES are blue.<p>MAYBE...</p>The roses are RED.'
self._assert_recomposes(translation, result)
def test_no_new_tag_attributes_can_be_added_in_translations(self):
html = 'The <a class="foo">skies</a> are blue.'
self._assert_decomposes(html, None)
translation = ['The <a#1 onclick="bar">SKIES</a#1> are blue.']
result = 'The <a class="foo">SKIES</a> are blue.'
self._assert_recomposes(translation, result)
def test_whitespace_is_preserved(self):
html = 'foo <b><i>bar</i></b>'
expected_no_whitespace = ['foo', '<i#1>bar</i#1>']
self._assert_decomposes(html, expected_no_whitespace)
translation_no_whitespace = ['FOO', '<i#1>BAR</i#1>']
result_no_whitespace = 'FOO<b><i>BAR</i></b>'
self._assert_recomposes(translation_no_whitespace, result_no_whitespace)
expected_with_whitespace = ['foo ', '<i#1>bar</i#1>']
self._assert_decomposes(
html, expected_with_whitespace, ignore_whitespace=False)
translation_with_whitespace = ['FOO ', '<i#1>BAR</i#1>']
result_with_whitespace = 'FOO <b><i>BAR</i></b>'
self._assert_recomposes(
translation_with_whitespace, result_with_whitespace)
def test_no_new_tags_can_be_added_in_translations(self):
original = 'The <a class="foo">skies</a> are blue.'
self._assert_decomposes(original, None)
translation = ['The <a#1>SKIES</a#1> are <b#2>blue</b#2>.']
_error = self._assert_recomposes_error(translation)
if not isinstance(_error.original_exception, LookupError):
_error.reraise()
self.assertEquals(
'Unexpected tag: <b#2>.',
_error.original_exception.message)
self.assertEqual(0, _error.index)
def test_all_tags_must_be_indexed_in_translations(self):
original = 'The <a class="foo">skies</a> are blue.'
self._assert_decomposes(original, None)
translation = ['The <a#1>SKIES</a#1> are <b>blue</b>.']
_error = self._assert_recomposes_error(translation)
if not isinstance(_error.original_exception, SyntaxError):
_error.reraise()
self.assertEquals(
'Error extracting index form the tag <b>. '
'Tag name format is <tag_name#index>, like <a#1>.',
_error.original_exception.message)
self.assertEqual(0, _error.index)
def test_all_tags_must_be_translated_in_translations(self):
original = 'The <a class="foo">skies</a> are <a href="bar">blue</a>.'
expected = ['The <a#1>skies</a#1> are <a#2>blue</a#2>.']
self._assert_decomposes(original, expected)
translation = ['The SKIES are blue.']
_error = self._assert_recomposes_error(translation)
if not isinstance(_error.original_exception, LookupError):
_error.reraise()
self.assertEquals(
'Expected to find the following tags: <a#1>, <a#2>.',
_error.original_exception.message)
self.assertEqual(0, _error.index)
def test_can_recompose_alphanum_tag_names(self):
config = Configuration(
inline_tag_names=['GCB-HTML5VIDEO'],
omit_empty_opaque_decomposable=False)
self.transformer = ContentTransformer(config)
html = 'video <gcb-html5video url="woo.mp4"></gcb-html5video>'
expected = ['video <gcb-html5video#1 />']
self._assert_decomposes(html, expected)
translation = ['VIDEO <gcb-html5video#1 />']
result = 'VIDEO <gcb-html5video url="woo.mp4"></gcb-html5video>'
self._assert_recomposes(translation, result)
def test_recompose_called_multiple_times_fails(self):
html = 'The <a class="foo">skies</a> are blue.'
self._assert_decomposes(html, None)
translation = ['The <a#1 onclick="bar">SKIES</a#1> are blue.']
result = 'The <a class="foo">SKIES</a> are blue.'
self._assert_recomposes(translation, result)
_error = self._assert_recomposes_error(translation)
if not isinstance(_error, AssertionError):
raise Exception()
self.assertEquals(
'Please create new context; this context is not reusable.',
_error.message)
def test_recompose_large_sample_document(self):
self.maxDiff = None
original = ContentIO.tostring(ContentIO.fromstring(
SAMPLE_HTML_DOC_CONTENT))
self._assert_decomposes(original, None)
translations = [] + SAMPLE_HTML_DOC_DECOMPOSE
translations[2] = '<a#1>SKIP TO CONTENT</a#1>'
result = original.replace('Skip to content', 'SKIP TO CONTENT')
self._assert_recomposes(translations, result)
def test_recompose_resource_bundle_into(self):
original = '<p>The <a href="foo">skies</a> are blue!</p>'
translation = [u'<a#1>Небо</a#1> синее!']
expected = u'<p><a href="foo">Небо</a> синее!</p>'
context, _ = merge_resource_bundle_into(
html=original, resource_bundle=translation)
self.assertEqual(expected, ContentIO.tostring(context.tree))
def run_all_unit_tests():
"""Runs all unit tests in this module."""
suites_list = []
for test_class in [
ListsDifflibTests, SetsDifflibUtils,
TestCasesForIO,
TestCasesForContentDecompose, TestCasesForContentRecompose]:
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
suites_list.append(suite)
result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))
if not result.wasSuccessful() or result.errors:
raise Exception(result)
# Below we keep content needed for test cases. We keep them here to allow this
# to be reused in any application and splitting test out into /tests/... would
# make this more difficult."""
# pylint: disable=line-too-long
# taken from http://www.google.com/edu/openonline/edukit/course-parts.html
SAMPLE_HTML_DOC_CONTENT = u'''
<!DOCTYPE html>
<html class="google" lang="en">
<head>
<script>
(function(H){H.className=H.className.replace(/\bgoogle\b/,'google-js')})(document.documentElement)
</script>
<meta charset="utf-8">
<meta content="initial-scale=1, minimum-scale=1, width=device-width" name="viewport">
<title>
Google Open Online Education
</title>
<script src="//www.google.com/js/google.js">
</script>
<script>
new gweb.analytics.AutoTrack({profile:"UA-12481063-1"});
</script>
<link href="//fonts.googleapis.com/css?family=Open+Sans:300,400,600,700&lang=en" rel=
"stylesheet">
<link href=" /edu/openonline/css/edukit.css" rel="stylesheet">
</head>
<body>
<div class="maia-header" id="maia-header" role="banner">
<div class="maia-aux">
<h1>
<a href="/"><img alt="Google" src="//www.google.com/images/logos/google_logo_41.png">
Open Online Education</a>
</h1><a class="maia-teleport" href="#content">Skip to content</a>
</div>
</div>
<div class="maia-nav" id="maia-nav-x" role="navigation">
<div class="maia-aux">
<ul>
<li>
<a data-g-action="Maia: Level 1" data-g-event="Maia: Site Nav" data-g-label="OOE_Home"
href="/edu/openonline/index.html">Home</a>
</li>
<li>
<a data-g-action="Maia: Level 1" data-g-event="Maia: Site Nav" data-g-label=
"OOE_Insights" href="/edu/openonline/insights/index.html">Insights</a>
</li>
<li>
<a class="active" data-g-action="Maia: Level 1" data-g-event="Maia: Site Nav"
data-g-label="OOE_Edu_Kit" href="/edu/openonline/edukit/index.html">Online Course
Kit</a>
</li>
<li>
<a data-g-action="Maia: Level 1" data-g-event="Maia: Site Nav" data-g-label=
"OOE_Open_edX" href="/edu/openonline/tech/index.html">Technologies</a>
</li>
<li>
<a class="active" data-g-action="Maia: Level 1" data-g-event="Maia: Site Nav"
data-g-label="GOOG_EDU_main" href="/edu/index.html">Google for Education</a>
</li>
</ul>
</div>
</div>
<div id="maia-main" role="main">
<div class="maia-nav-aux">
<div class="edukit_nav">
<ul>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label=
"Quick Start" href="/edu/openonline/edukit/quickstart.html">Quick Start</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label="Plan"
href="/edu/openonline/edukit/plan.html">Plan</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label="Create"
href="/edu/openonline/edukit/create.html">Create</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label=
"Implement" href="/edu/openonline/edukit/implement.html">Implement</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label="Pilot"
href="/edu/openonline/edukit/pilot.html">Pilot</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label=
"Communicate" href="/edu/openonline/edukit/communicate.html">Communicate</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label=
"Using Course Builder" href="/edu/openonline/edukit/course-parts.html">Using Course
Builder</a>
</li>
<li>
<a data-g-action="Maia: Level 2" data-g-event="Maia: Site Nav" data-g-label=
"More Resources" href="/edu/openonline/edukit/resource.html">More Resources</a>
</li>
</ul>
</div>
</div>
<div class="maia-teleport" id="content"></div>
<div class="clearfix_nav"></div>
<div class="ooe_content">
<h1>
Parts of a Course Builder Course
</h1>
<p>
The primary parts of a course created with Course Builder are as follows:
</p>
<ul>
<li>
<a href="#course_content_and_delivery">Course content and delivery</a><br>
The material that you formally convey to students. Formal content can be lessons
recorded or written in advance. It can also be live question and answer sessions with
course staff.
</li>
<li>
<a href="#assessments_and_activities">Assessments and activities</a><br>
Graded assessments with a fixed deadline to track student progress. You can also use
ungraded assessments, called <strong>activities</strong>, to provide feedback and hints
to students.
</li>
<li>
<a href="#social_interactions">Social interactions</a><br>
An important component of an online course is the interactions among the students
themselves and the interactions between students and the course staff (the instructors
or teaching assistants).
</li>
<li>
<a href="#administrative_tasks">Administrative tasks</a><br>
Of course, there are tasks such as registering students, setting up the course,
tracking usage, and so on.
</li>
</ul>
<p>
A single course consists of a series of units with individual lessons and activities. The
course can have any number of graded assessments scattered before, between, and after the
units and lessons. It can also have one or more formally-set up avenues for social
interaction for the students.
</p>
<p>
For a quick description of the flow a student typically experiences in a course, see
<a href="courseflow.html">Course Flow for Students</a>. For a description of the design
process we think is effective, see <a href="design-process.html">Design Process</a>.
</p>
<p>
The rest of this page discusses the four main parts of a course in more detail.
</p><a id="course_content_and_delivery" name="course_content_and_delivery"></a>
<h2>
Course content and delivery
</h2>
<p>
To make the content more digestible, consider grouping course material into a number of
units. Each unit contains a series of lessons and possibly activities related to a
particular topic within the content covered by the entire course.
</p><input class="toggle-box-small" id="units1" type="checkbox"> <label for="units1">Units
with lessons and activities</label>
<div class="toggle-small maia-aside">
<p>
In the <a href="power-searching.html">Power Searching with Google</a> course, one unit
is about interpreting search results; another is about checking the reliability of the
content of those search results. Each of those units consists of about five lessons and
about five activities. For these units, course staff creates and releases the lessons
and activities ahead of time. While the material is available to students, course staff
interacts with students through the <a href="forums.html">participant community
mechanisms</a>.
</p>
<p>
For a unit that consists of a series of lessons and activities, we found that around
five lessons and four activities is a good length.
</p>
<p>
A lesson is a coherent and relatively small chunk of information. In Power Searching
with Google, we chose to create each lesson as one video and a text version of the same
content. Your lessons do not have to have both parts. For more information, see
<a href="//code.google.com/p/course-builder/wiki/CreateLessons">Create Lessons</a>.
</p>
<p>
An activity is an ungraded assessment, used to provide feedback to students on how well
they understand the lesson. Activities typically contain optional hints. For more
information, see <a href="#assessments_and_activities">Assessments and activities</a>.
</p>
<p>
Tips:
</p>
<ul>
<li>Make short videos, preferably 3-5 minutes.
</li>
<li>Include closed captions in your videos.
</li>
<li>For the text version, take the time to clean up the transcript.
</li>
<li>When deciding what content to include, design for the average student. To
accommodate other students, consider including background or advanced material in forum
posts (if you want discussion and maybe answers) or in Google+ or blog posts (if you
just want to broadcast the information).
</li>
</ul>
</div><input class="toggle-box-small" id="units2" type="checkbox"> <label for=
"units2">Units using Hangouts on Air</label>
<div class="toggle-small maia-aside">
<p>
A very different type of unit is online office hours where the students submit
questions ahead of time and the course staff answers those questions in real-time using
a <a href="http://www.google.com/+/learnmore/hangouts/onair.html">Hangout On Air</a>.
Depending on your course, you may have some students interacting with the course staff
over video for the Hangout On Air or you may have students submit all of their
questions using <a href="https://www.google.com/moderator/">Google Moderator</a>.
</p>
<p>
For online office hours you have a fixed date and time when the course staff broadcasts
a session for students to watch and interact with.
</p>
<p>
If you have a very small course (fewer than 10 people), you can use a Google Hangout
for your session. If you have more than 10 people, you can use a combination of Google
Hangouts on Air and Google Moderator instead.
</p>
<p>
A <a href="//www.google.com/+/learnmore/hangouts/">Google Hangout</a> is a video chat
that can have up to 10 participants. In a Google Hangout, all participants can share
what\u2019s on each person's screen, collaborate in Google Docs, view presentations and
diagrams together and speak to each other. If your course is small enough, this is a
great way to go. If your course is large, you may still consider having your students
break into small groups for interactive activities with each other over Hangouts.
</p>
<p>
If your course has many more than 10 students, you can use a combination of Hangouts on
Air and Google Moderator to create a live experience with your students. With a
<a href="//code.google.com/p/course-builder/wiki/OnlineOfficeHours#Setting_up_a_Hangout_On_Air">
Google Hangout on Air</a>, you can post a live session with your instructors and any
guests you chose. You post the Hangout on Air to your YouTube channel and to your
Google+ stream. Students cannot talk to you in real-time, but they can ask you
questions through the use of a Google Moderator series, or by posting comments to the
Moderator stream, YouTube stream, or Google+ stream. You can use a <a href=
"https://code.google.com/p/course-builder/wiki/OnlineOfficeHours#Setting_up_a_Google_Moderator_series">
Google Moderator series</a> to collect questions from your students and have them vote
those questions up and down; the collecting can either be done in advance, during the
Hangout on Air, or both.
</p>
<p>
<strong>Tip:</strong> If you do a Hangout on Air, consider using a live captioning
service to help students who are hearing impaired or whose primary language is not the
language used in the Hangout on Air.
</p>
</div>
<p>
For all of these unit types, instructors make course content available to students at
scheduled intervals throughout the course. Once available, the content continues to be
available until the course ends. That is, lessons are not available for only a few days;
students can go back and redo lessons at any time throughout the course. In <a href=
"http://www.powersearchingwithgoogle.com/">Power Searching with Google</a>, soon after
online office hours took place, the course staff posted a video of it. So even if
students missed the office hours, that material was still available.
</p>
<p>
Releasing course content at scheduled intervals has one perhaps unanticipated benefit.
Many students tend to work on content relatively soon after the content becomes
available. For that reason, questions about course material tend to cluster near the
release of that material. Because other students are thinking about the same material,
they are more likely to be interested in getting involved in discussions about the
material or in answering questions.
</p>
<p>
These are only some possibilities for how to model units. You may discover other ways to
do things that suit your material better. For example, instead of all of the teaching
being pushed from the course staff, you may decide to break your students into small
cohorts and have those cohorts work on material together. You could provide them with
lessons and activities to start from and then have them use Hangouts of their own for
group study.
</p><a id="assessments_and_activities" name="assessments_and_activities"></a>
<h2>
Assessments and activities
</h2>
<p>
In Course Builder, an assessment is a test. Assessments can either be graded or ungraded.
Ungraded assessments are also called activities.
</p>
<p>
When you create your course using Course Builder, you supply the code with the
information needed to grade assessments.
</p><input class="toggle-box-small" id="question-types" type="checkbox"> <label for=
"question-types">Question types</label>
<div class="toggle-small maia-aside">
<p>
Graded and ungraded assessments essentially support the same types of questions:
</p>
<ul>
<li>Multiple-choice with one correct answer
</li>
<li>Multiple-choice with more than one correct answer
</li>
<li>Fill-in-the blank
</li>
<li>Go and do something. These are questions that do not have prepared answers and
instead invite the user to engage in some action. For example, in <a href=
"//www.powersearchingwithgoogle.com/">Power Searching with Google</a> one of the
questions was "When was the last historic earthquake in your area? Share your answer in
the forum."
</li>
</ul>
<p>
Telling the experimental code how to grade multiple-choice questions is
straightforward. Telling it how to grade fill-in-the-blank questions can be trickier.
You need to be very careful both in your wording of the question and in what you
include about the correct answer. \u201cGo and do something\u201d questions do not require an
answer, so you don\u2019t have to include anything about the answer.
</p>
</div><input class="toggle-box-small" id="ungraded-activities" type="checkbox"> <label for=
"ungraded-activities">Ungraded activities</label>
<div class="toggle-small maia-aside">
<p>
An activity typically covers material only from the lesson that the activity
immediately follows. You use them to let the students assess their own understanding of
the material in that lesson. An activity does not affect a student\u2019s final score in the
course.
</p>
<p>
When you create a question for an activity, you can provide the following information:
</p>
<ul>
<li>The correct answer to the question, so the code knows what to tell the student.
</li>
<li>A hint about why incorrect answers are incorrect. The hint should point the student
to the correct answer.
</li>
<li>The correct answer and explanatory information.
</li>
</ul>
</div><input class="toggle-box-small" id="graded-assessments" type="checkbox"> <label for=
"graded-assessments">Graded assessments</label>
<div class="toggle-small maia-aside">
<p>
Graded assessments typically cover material from several units and lessons. You use
them to rate students\u2019 performance. Before and after assessments can also help you
gauge the effectiveness of the course.
</p>
<p>
With Course Builder's experimental code, you have control over how many graded
assessments you provide and how each of those assessments counts in the final scoring
for a student\u2019s grade.
</p>
<p>
Because you use a graded assessment to rate performance and measure success, your
practical choices are:
</p>
<ul>
<li>Only let students take a graded assessment once. In this case, you can tell your
students which of their answers are incorrect.
</li>
<li>Let students take a graded assessment multiple times. In this case, do not tell
them which answers are incorrect. (If you do, then they'll have no difficulty getting
100% when retaking the same assessment.)
</li>
</ul>
<p>
If you choose to allow your students to take the same graded assessment multiple times,
consider still giving the students some feedback about what they did wrong. To do this,
map each assessment question to the corresponding unit and lesson within the course.
Then immediately after submission of the assessment, show students the score and list
the lessons to review to improve their score.
</p>
</div>
<h2>
Social interactions
</h2>
<p>
Another critical component of a successful course is student participation. Online office
hours and asking questions of the experts are some examples to elicit participation.
</p>
<p>
For large online courses, the size of the audience means that it is impractical for the
course staff to answer all of the questions and to enter all of the discussions posed by
all of the students. Instead, you can set up avenues in which the students can
participate not just with the instructor but also with other students.
</p>
<p>
The most common types of social interactions are:
</p>
<ul>
<li>
<a href="//code.google.com/p/course-builder/wiki/WebForums">Google Groups or other web
forum</a><br>
A web forum is a great way to get your students to talk to each other. To facilitate
discussion, you can set up your forum with appropriate categories, to guide students to
likely places to read and to post questions on particular topics within your course.
When designing the content of your course, consider creating activities requesting that
students post answers to the forum. You can also use a forum to post material that you
do not want in the main body of your course, either because it is background material
for students who need a bit more help or more challenging questions for more advanced
students.
</li>
<li>
<a href="//code.google.com/p/course-builder/wiki/Announcements">Google+ page or
blog</a><br>
Use Google+ or your blog to share information that you want available to not just your
students, but to other people as well. While students can comment on your posts, these
formats are still primarily methods for instructors to push information out to the
students.
</li>
<li>
<a href="//www.google.com/+/learnmore/hangouts/">Google Hangout</a><br>
You may decide that you want your students to divide into smaller groups to work on
projects together. Your students probably live in distributed areas. You can have them
meet in a Google Hangout to collaborate on their project.
</li>
<li>
<a href=
"https://code.google.com/p/course-builder/wiki/CreateEmailList">Announcements-only
email alias</a><br>
Throughout the course, you may want to send email to students, such as to remind them
of upcoming events.
</li>
</ul>
<p>
In addition to these things that you set up, students may create additional interaction
mechanisms, perhaps an email alias for students interested in a particular aspect of the
course material or weekly in-person meetings for students living close to each other.
</p><a id="administrivia" name="administrivia"></a>
<h2>
Administrative tasks
</h2>
<p>
Of course, as with any class there are various administrative aspects to creating an
online course. Two of the major ones are <a href=
"//code.google.com/p/course-builder/wiki/CreateRegistration">managing student
registration</a> and <a href=
"//code.google.com/p/course-builder/wiki/MeasureEfficacy">collecting and analyzing data
to see how well your course does</a>.
</p>
<p>
For a full list of tasks needed to create a course, see the <a href=
"//code.google.com/p/course-builder/wiki/CourseBuilderChecklist">Course Builder
Checklist</a>.
</p>
</div>
</div>
<div id="maia-signature"></div>
<div class="maia-footer" id="maia-footer">
<div id="maia-footer-global">
<div class="maia-aux">
<ul>
<li>
<a href="/">Google</a>
</li>
<li>
<a href="/intl/en/about/">About Google</a>
</li>
<li>
<a href="/intl/en/policies/">Privacy & Terms</a>
</li>
</ul>
</div>
</div>
</div><script src="//www.google.com/js/maia.js">
</script>
</body>
</html>
'''
SAMPLE_HTML_DOC_DECOMPOSE = [
'Google Open Online Education',
'<img#1 src="//www.google.com/images/logos/google_logo_41.png" alt="Google" />\n Open Online Education',
'<a#1>Skip to content</a#1>',
'<a#1>Home</a#1>',
'<a#1>Insights</a#1>',
'<a#1>Online Course\n Kit</a#1>',
'<a#1>Technologies</a#1>',
'<a#1>Google for Education</a#1>',
'<a#1>Quick Start</a#1>',
'<a#1>Plan</a#1>',
'<a#1>Create</a#1>',
'<a#1>Implement</a#1>',
'<a#1>Pilot</a#1>',
'<a#1>Communicate</a#1>',
'<a#1>Using Course\n Builder</a#1>',
'<a#1>More Resources</a#1>',
'Parts of a Course Builder Course',
'The primary parts of a course created with Course Builder are as follows:',
'<a#1>Course content and delivery</a#1><br#2 />\n The material that you formally convey to students. Formal content can be lessons\n recorded or written in advance. It can also be live question and answer sessions with\n course staff.',
'<a#1>Assessments and activities</a#1><br#2 />\n Graded assessments with a fixed deadline to track student progress. You can also use\n ungraded assessments, called <strong#3>activities</strong#3>, to provide feedback and hints\n to students.',
'<a#1>Social interactions</a#1><br#2 />\n An important component of an online course is the interactions among the students\n themselves and the interactions between students and the course staff (the instructors\n or teaching assistants).',
'<a#1>Administrative tasks</a#1><br#2 />\n Of course, there are tasks such as registering students, setting up the course,\n tracking usage, and so on.',
'A single course consists of a series of units with individual lessons and activities. The\n course can have any number of graded assessments scattered before, between, and after the\n units and lessons. It can also have one or more formally-set up avenues for social\n interaction for the students.',
'For a quick description of the flow a student typically experiences in a course, see\n <a#1>Course Flow for Students</a#1>. For a description of the design\n process we think is effective, see <a#2>Design Process</a#2>.',
'The rest of this page discusses the four main parts of a course in more detail.',
'Course content and delivery',
'To make the content more digestible, consider grouping course material into a number of\n units. Each unit contains a series of lessons and possibly activities related to a\n particular topic within the content covered by the entire course.',
'Units\n with lessons and activities',
'In the <a#1>Power Searching with Google</a#1> course, one unit\n is about interpreting search results; another is about checking the reliability of the\n content of those search results. Each of those units consists of about five lessons and\n about five activities. For these units, course staff creates and releases the lessons\n and activities ahead of time. While the material is available to students, course staff\n interacts with students through the <a#2>participant community\n mechanisms</a#2>.',
'For a unit that consists of a series of lessons and activities, we found that around\n five lessons and four activities is a good length.',
'A lesson is a coherent and relatively small chunk of information. In Power Searching\n with Google, we chose to create each lesson as one video and a text version of the same\n content. Your lessons do not have to have both parts. For more information, see\n <a#1>Create Lessons</a#1>.',
'An activity is an ungraded assessment, used to provide feedback to students on how well\n they understand the lesson. Activities typically contain optional hints. For more\n information, see <a#1>Assessments and activities</a#1>.',
'Tips:',
'Make short videos, preferably 3-5 minutes.',
'Include closed captions in your videos.',
'For the text version, take the time to clean up the transcript.',
'When deciding what content to include, design for the average student. To\n accommodate other students, consider including background or advanced material in forum\n posts (if you want discussion and maybe answers) or in Google+ or blog posts (if you\n just want to broadcast the information).',
'Units using Hangouts on Air',
'A very different type of unit is online office hours where the students submit\n questions ahead of time and the course staff answers those questions in real-time using\n a <a#1>Hangout On Air</a#1>.\n Depending on your course, you may have some students interacting with the course staff\n over video for the Hangout On Air or you may have students submit all of their\n questions using <a#2>Google Moderator</a#2>.',
'For online office hours you have a fixed date and time when the course staff broadcasts\n a session for students to watch and interact with.',
'If you have a very small course (fewer than 10 people), you can use a Google Hangout\n for your session. If you have more than 10 people, you can use a combination of Google\n Hangouts on Air and Google Moderator instead.',
u'A <a#1>Google Hangout</a#1> is a video chat\n that can have up to 10 participants. In a Google Hangout, all participants can share\n what\u2019s on each person's screen, collaborate in Google Docs, view presentations and\n diagrams together and speak to each other. If your course is small enough, this is a\n great way to go. If your course is large, you may still consider having your students\n break into small groups for interactive activities with each other over Hangouts.',
'If your course has many more than 10 students, you can use a combination of Hangouts on\n Air and Google Moderator to create a live experience with your students. With a\n <a#1>\n Google Hangout on Air</a#1>, you can post a live session with your instructors and any\n guests you chose. You post the Hangout on Air to your YouTube channel and to your\n Google+ stream. Students cannot talk to you in real-time, but they can ask you\n questions through the use of a Google Moderator series, or by posting comments to the\n Moderator stream, YouTube stream, or Google+ stream. You can use a <a#2>\n Google Moderator series</a#2> to collect questions from your students and have them vote\n those questions up and down; the collecting can either be done in advance, during the\n Hangout on Air, or both.',
'<strong#1>Tip:</strong#1> If you do a Hangout on Air, consider using a live captioning\n service to help students who are hearing impaired or whose primary language is not the\n language used in the Hangout on Air.',
'For all of these unit types, instructors make course content available to students at\n scheduled intervals throughout the course. Once available, the content continues to be\n available until the course ends. That is, lessons are not available for only a few days;\n students can go back and redo lessons at any time throughout the course. In <a#1>Power Searching with Google</a#1>, soon after\n online office hours took place, the course staff posted a video of it. So even if\n students missed the office hours, that material was still available.',
'Releasing course content at scheduled intervals has one perhaps unanticipated benefit.\n Many students tend to work on content relatively soon after the content becomes\n available. For that reason, questions about course material tend to cluster near the\n release of that material. Because other students are thinking about the same material,\n they are more likely to be interested in getting involved in discussions about the\n material or in answering questions.',
'These are only some possibilities for how to model units. You may discover other ways to\n do things that suit your material better. For example, instead of all of the teaching\n being pushed from the course staff, you may decide to break your students into small\n cohorts and have those cohorts work on material together. You could provide them with\n lessons and activities to start from and then have them use Hangouts of their own for\n group study.',
'Assessments and activities',
'In Course Builder, an assessment is a test. Assessments can either be graded or ungraded.\n Ungraded assessments are also called activities.',
'When you create your course using Course Builder, you supply the code with the\n information needed to grade assessments.',
'Question types',
'Graded and ungraded assessments essentially support the same types of questions:',
'Multiple-choice with one correct answer',
'Multiple-choice with more than one correct answer',
'Fill-in-the blank',
'Go and do something. These are questions that do not have prepared answers and\n instead invite the user to engage in some action. For example, in <a#1>Power Searching with Google</a#1> one of the\n questions was "When was the last historic earthquake in your area? Share your answer in\n the forum."',
u'Telling the experimental code how to grade multiple-choice questions is\n straightforward. Telling it how to grade fill-in-the-blank questions can be trickier.\n You need to be very careful both in your wording of the question and in what you\n include about the correct answer. \u201cGo and do something\u201d questions do not require an\n answer, so you don\u2019t have to include anything about the answer.',
'Ungraded activities',
u'An activity typically covers material only from the lesson that the activity\n immediately follows. You use them to let the students assess their own understanding of\n the material in that lesson. An activity does not affect a student\u2019s final score in the\n course.',
'When you create a question for an activity, you can provide the following information:',
'The correct answer to the question, so the code knows what to tell the student.',
'A hint about why incorrect answers are incorrect. The hint should point the student\n to the correct answer.',
'The correct answer and explanatory information.',
'Graded assessments',
u'Graded assessments typically cover material from several units and lessons. You use\n them to rate students\u2019 performance. Before and after assessments can also help you\n gauge the effectiveness of the course.',
u'With Course Builder's experimental code, you have control over how many graded\n assessments you provide and how each of those assessments counts in the final scoring\n for a student\u2019s grade.',
'Because you use a graded assessment to rate performance and measure success, your\n practical choices are:',
'Only let students take a graded assessment once. In this case, you can tell your\n students which of their answers are incorrect.',
'Let students take a graded assessment multiple times. In this case, do not tell\n them which answers are incorrect. (If you do, then they'll have no difficulty getting\n 100% when retaking the same assessment.)',
'If you choose to allow your students to take the same graded assessment multiple times,\n consider still giving the students some feedback about what they did wrong. To do this,\n map each assessment question to the corresponding unit and lesson within the course.\n Then immediately after submission of the assessment, show students the score and list\n the lessons to review to improve their score.',
'Social interactions',
'Another critical component of a successful course is student participation. Online office\n hours and asking questions of the experts are some examples to elicit participation.',
'For large online courses, the size of the audience means that it is impractical for the\n course staff to answer all of the questions and to enter all of the discussions posed by\n all of the students. Instead, you can set up avenues in which the students can\n participate not just with the instructor but also with other students.',
'The most common types of social interactions are:',
'<a#1>Google Groups or other web\n forum</a#1><br#2 />\n A web forum is a great way to get your students to talk to each other. To facilitate\n discussion, you can set up your forum with appropriate categories, to guide students to\n likely places to read and to post questions on particular topics within your course.\n When designing the content of your course, consider creating activities requesting that\n students post answers to the forum. You can also use a forum to post material that you\n do not want in the main body of your course, either because it is background material\n for students who need a bit more help or more challenging questions for more advanced\n students.',
'<a#1>Google+ page or\n blog</a#1><br#2 />\n Use Google+ or your blog to share information that you want available to not just your\n students, but to other people as well. While students can comment on your posts, these\n formats are still primarily methods for instructors to push information out to the\n students.',
'<a#1>Google Hangout</a#1><br#2 />\n You may decide that you want your students to divide into smaller groups to work on\n projects together. Your students probably live in distributed areas. You can have them\n meet in a Google Hangout to collaborate on their project.',
'<a#1>Announcements-only\n email alias</a#1><br#2 />\n Throughout the course, you may want to send email to students, such as to remind them\n of upcoming events.',
'In addition to these things that you set up, students may create additional interaction\n mechanisms, perhaps an email alias for students interested in a particular aspect of the\n course material or weekly in-person meetings for students living close to each other.',
'Administrative tasks',
'Of course, as with any class there are various administrative aspects to creating an\n online course. Two of the major ones are <a#1>managing student\n registration</a#1> and <a#2>collecting and analyzing data\n to see how well your course does</a#2>.',
'For a full list of tasks needed to create a course, see the <a#1>Course Builder\n Checklist</a#1>.',
'<a#1>Google</a#1>',
'<a#1>About Google</a#1>',
'<a#1>Privacy & Terms</a#1>'
]
# pylint: enable=line-too-long
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper classes to implement caching."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import collections
import datetime
import logging
import sys
import threading
import unittest
import appengine_config
from models.counters import PerfCounter
def iter_all(query, batch_size=100):
"""Yields query results iterator. Proven method for large datasets."""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = query.with_cursor(prev_cursor)
for entity in query.run(batch_size=batch_size):
any_records = True
yield entity
prev_cursor = query.cursor()
class AbstractScopedSingleton(object):
"""A singleton object bound to and managed by a container.
This singleton stores its instance inside the container. When container is
wiped, the singleton instance is garbage collected and destroyed. You can
use a dict as a container and then wipe it yourself. You can use
threading.local as a container and it will be wiped automatically when
thread exits.
"""
CONTAINER = None
@classmethod
def _instances(cls):
assert cls.CONTAINER is not None
if 'instances' not in cls.CONTAINER:
cls.CONTAINER['instances'] = {}
return cls.CONTAINER['instances']
@classmethod
def instance(cls, *args, **kwargs):
"""Creates new or returns existing instance of the object."""
# pylint: disable=protected-access
_instance = cls._instances().get(cls)
if not _instance:
try:
_instance = cls(*args, **kwargs)
except:
logging.exception(
'Failed to instantiate %s: %s, %s', cls, args, kwargs)
raise
appengine_config.log_appstats_event('%s.create' % cls.__name__, {})
_instance._init_args = (args, kwargs)
cls._instances()[cls] = _instance
else:
_before = _instance._init_args
_now = (args, kwargs)
if _now != _before:
raise AssertionError(
'Singleton initiated with %s already exists. '
'Failed to re-initialized it with %s.' % (_before, _now))
return _instance
@classmethod
def clear_all(cls):
"""Clear all active instances."""
if cls._instances():
for _instance in list(cls._instances().values()):
_instance.clear()
del cls.CONTAINER['instances']
def clear(self):
"""Destroys this object and its content."""
appengine_config.log_appstats_event(
'%s.destroy' % self.__class__.__name__, {})
_instance = self._instances().get(self.__class__)
if _instance:
del self._instances()[self.__class__]
_process_scoped_singleton = {}
_request_scoped_singleton = threading.local()
class ProcessScopedSingleton(AbstractScopedSingleton):
"""A singleton object bound to the process."""
CONTAINER = _process_scoped_singleton
class RequestScopedSingleton(AbstractScopedSingleton):
"""A singleton object bound to the request scope."""
CONTAINER = _request_scoped_singleton.__dict__
class LRUCache(object):
"""A dict that supports capped size and LRU eviction of items."""
def __init__(
self, max_item_count=None,
max_size_bytes=None, max_item_size_bytes=None):
assert max_item_count or max_size_bytes
if max_item_count:
assert max_item_count > 0
if max_size_bytes:
assert max_size_bytes > 0
self.total_size = 0
self.max_item_count = max_item_count
self.max_size_bytes = max_size_bytes
self.max_item_size_bytes = max_item_size_bytes
self.items = collections.OrderedDict([])
def get_entry_size(self, key, value):
"""Computes item size. Override and compute properly for your items."""
return sys.getsizeof(key) + sys.getsizeof(value)
def _compute_current_size(self):
total = 0
for key, item in self.items.iteritems():
total += sys.getsizeof(key) + self.get_item_size(item)
return total
def _allocate_space(self, key, value):
"""Remove items in FIFO order until size constraints are met."""
entry_size = self.get_entry_size(key, value)
if self.max_item_size_bytes and entry_size > self.max_item_size_bytes:
return False
while True:
over_count = False
over_size = False
if self.max_item_count:
over_count = len(self.items) >= self.max_item_count
if self.max_size_bytes:
over_size = self.total_size + entry_size >= self.max_size_bytes
if not (over_count or over_size):
if self.max_size_bytes:
self.total_size += entry_size
assert self.total_size < self.max_size_bytes
return True
if self.items:
_key, _value = self.items.popitem(last=False)
if self.max_size_bytes:
self.total_size -= self.get_entry_size(_key, _value)
assert self.total_size >= 0
else:
break
return False
def _record_access(self, key):
"""Pop and re-add the item."""
item = self.items.pop(key)
self.items[key] = item
def contains(self, key):
"""Checks if item is contained without accessing it."""
assert key
return key in self.items
def put(self, key, value):
assert key
if self._allocate_space(key, value):
self.items[key] = value
return True
return False
def get(self, key):
"""Accessing item makes it less likely to be evicted."""
assert key
if key in self.items:
self._record_access(key)
return True, self.items[key]
return False, None
def delete(self, key):
assert key
if key in self.items:
del self.items[key]
return True
return False
class NoopCacheConnection(object):
"""Connection to no-op cache that provides no caching."""
def put(self, *unused_args, **unused_kwargs):
return None
def get(self, *unused_args, **unused_kwargs):
return False, None
def delete(self, *unused_args, **unused_kwargs):
return None
class AbstractCacheEntry(object):
"""Object representation while in cache."""
# we don't track deletions; deleted item will hang around this long
CACHE_ENTRY_TTL_SEC = 5 * 60
@classmethod
def internalize(cls, unused_key, *args, **kwargs):
"""Converts incoming objects into cache entry object."""
return (args, kwargs)
@classmethod
def externalize(cls, unused_key, *args, **kwargs):
"""Converts cache entry into external object."""
return (args, kwargs)
def has_expired(self):
age = (datetime.datetime.utcnow() - self.created_on).total_seconds()
return age > self.CACHE_ENTRY_TTL_SEC
def is_up_to_date(self, unused_key, unused_update):
"""Compare entry and the update object to decide if entry is fresh."""
raise NotImplementedError()
def updated_on(self):
"""Return last update time for entity."""
raise NotImplementedError()
class AbstractCacheConnection(object):
PERSISTENT_ENTITY = None
CACHE_ENTRY = None
@classmethod
def init_counters(cls):
name = cls.__name__
cls.CACHE_RESYNC = PerfCounter(
'gcb-models-%s-cache-resync' % name,
'A number of times an vfs cache was updated.')
cls.CACHE_PUT = PerfCounter(
'gcb-models-%s-cache-put' % name,
'A number of times an object was put into cache.')
cls.CACHE_GET = PerfCounter(
'gcb-models-%s-cache-get' % name,
'A number of times an object was pulled from cache.')
cls.CACHE_DELETE = PerfCounter(
'gcb-models-%s-cache-delete' % name,
'A number of times an object was deleted from cache.')
cls.CACHE_HIT = PerfCounter(
'gcb-models-%s-cache-hit' % name,
'A number of times an object was found cache.')
cls.CACHE_HIT_NONE = PerfCounter(
'gcb-models-%s-cache-hit-none' % name,
'A number of times an object was found cache, but it was None.')
cls.CACHE_MISS = PerfCounter(
'gcb-models-%s-cache-miss' % name,
'A number of times an object was not found in the cache.')
cls.CACHE_NOT_FOUND = PerfCounter(
'gcb-models-%s-cache-not-found' % name,
'A number of times an object was requested, but was not found in '
'the cache or underlying provider.')
cls.CACHE_UPDATE_COUNT = PerfCounter(
'gcb-models-%s-cache-update-count' % name,
'A number of update objects received.')
cls.CACHE_EVICT = PerfCounter(
'gcb-models-%s-cache-evict' % name,
'A number of times an object was evicted from cache because it was '
'changed.')
cls.CACHE_EXPIRE = PerfCounter(
'gcb-models-%s-cache-expire' % name,
'A number of times an object has expired from cache because it was '
'too old.')
@classmethod
def make_key_prefix(cls, ns):
return '%s:%s' % (cls.__name__, ns)
@classmethod
def make_key(cls, ns, entry_key):
return '%s:%s' % (cls.make_key_prefix(ns), entry_key)
@classmethod
def is_enabled(cls):
raise NotImplementedError()
@classmethod
def new_connection(cls, *args, **kwargs):
if not cls.is_enabled():
return NoopCacheConnection()
conn = cls(*args, **kwargs)
# pylint: disable=protected-access
conn.apply_updates(conn._get_incremental_updates())
return conn
def __init__(self, namespace):
"""Override this method and properly instantiate self.cache."""
self.namespace = namespace
self.cache = None
appengine_config.log_appstats_event(
'%s.connect' % self.__class__.__name__, {'namespace': namespace})
def apply_updates(self, updates):
"""Applies a list of global changes to the local cache."""
self.CACHE_RESYNC.inc()
for key, update in updates.iteritems():
_key = self.make_key(self.namespace, key)
found, entry = self.cache.get(_key)
if not found:
continue
if entry is None:
self.CACHE_EVICT.inc()
self.cache.delete(_key)
continue
if not entry.is_up_to_date(key, update):
self.CACHE_EVICT.inc()
self.cache.delete(_key)
continue
if entry.has_expired():
self.CACHE_EXPIRE.inc()
self.cache.delete(_key)
continue
def _get_most_recent_updated_on(self):
"""Get the most recent item cached. Datastore deletions are missed..."""
has_items = False
max_updated_on = datetime.datetime.fromtimestamp(0)
prefix = self.make_key_prefix(self.namespace)
for key, entry in self.cache.items.iteritems():
if not key.startswith(prefix):
continue
has_items = True
if not entry:
continue
updated_on = entry.updated_on()
if not updated_on: # old entities may be missing this field
updated_on = datetime.datetime.fromtimestamp(0)
if updated_on > max_updated_on:
max_updated_on = updated_on
return has_items, max_updated_on
def get_updates_when_empty(self):
"""Override this method to pre-load cache when it's completely empty."""
return {}
def _get_incremental_updates(self):
"""Gets a list of global changes older than the most recent item cached.
WARNING!!! We fetch the updates since the timestamp of the oldest item
we have cached so far. This will bring all objects that have changed or
were created since that time.
This will NOT bring the notifications about object deletions. Thus cache
will continue to serve deleted objects until they expire.
Returns:
an dict of {key: update} objects that represent recent updates
"""
has_items, updated_on = self._get_most_recent_updated_on()
if not has_items:
return self.get_updates_when_empty()
q = self.PERSISTENT_ENTITY.all()
if updated_on:
q.filter('updated_on > ', updated_on)
result = {
entity.key().name(): entity for entity in iter_all(q)}
self.CACHE_UPDATE_COUNT.inc(len(result.keys()))
return result
def put(self, key, *args):
self.CACHE_PUT.inc()
self.cache.put(
self.make_key(self.namespace, key),
self.CACHE_ENTRY.internalize(key, *args))
def get(self, key):
self.CACHE_GET.inc()
_key = self.make_key(self.namespace, key)
found, entry = self.cache.get(_key)
if not found:
self.CACHE_MISS.inc()
return False, None
if not entry:
self.CACHE_HIT_NONE.inc()
return True, None
if entry.has_expired():
self.CACHE_EXPIRE.inc()
self.cache.delete(_key)
return False, None
self.CACHE_HIT.inc()
return True, self.CACHE_ENTRY.externalize(key, entry)
def delete(self, key):
self.CACHE_DELETE.inc()
self.cache.delete(self.make_key(self.namespace, key))
class LRUCacheTests(unittest.TestCase):
def test_ordereddict_works(self):
_dict = collections.OrderedDict([])
_dict['a'] = '1'
_dict['b'] = '2'
_dict['c'] = '3'
self.assertEqual(('a', '1'), _dict.popitem(last=False))
self.assertEqual(('c', '3'), _dict.popitem(last=True))
def test_initialization(self):
with self.assertRaises(AssertionError):
LRUCache()
with self.assertRaises(AssertionError):
LRUCache(max_item_count=-1)
with self.assertRaises(AssertionError):
LRUCache(max_size_bytes=-1)
LRUCache(max_item_count=1)
LRUCache(max_size_bytes=1)
def test_evict_by_count(self):
cache = LRUCache(max_item_count=3)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertTrue(cache.contains('a'))
self.assertTrue(cache.put('d', '4'))
self.assertFalse(cache.contains('a'))
self.assertEquals(cache.get('a'), (False, None))
def test_evict_by_count_lru(self):
cache = LRUCache(max_item_count=3)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertEquals(cache.get('a'), (True, '1'))
self.assertTrue(cache.put('d', '4'))
self.assertTrue(cache.contains('a'))
self.assertFalse(cache.contains('b'))
def test_evict_by_size(self):
min_size = sys.getsizeof(LRUCache(max_item_count=1).items)
item_size = sys.getsizeof('a1')
cache = LRUCache(max_size_bytes=min_size + 3 * item_size)
self.assertTrue(cache.put('a', '1'))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertFalse(cache.put('d', bytearray(1000)))
def test_evict_by_size_lru(self):
cache = LRUCache(max_size_bytes=5000)
self.assertTrue(cache.put('a', bytearray(4500)))
self.assertTrue(cache.put('b', '2'))
self.assertTrue(cache.put('c', '3'))
self.assertTrue(cache.contains('a'))
self.assertTrue(cache.put('d', bytearray(1000)))
self.assertFalse(cache.contains('a'))
self.assertTrue(cache.contains('b'))
def test_max_item_size(self):
cache = LRUCache(max_size_bytes=5000, max_item_size_bytes=1000)
self.assertFalse(cache.put('a', bytearray(4500)))
self.assertEquals(cache.get('a'), (False, None))
self.assertTrue(cache.put('a', bytearray(500)))
found, _ = cache.get('a')
self.assertTrue(found)
class SingletonTests(unittest.TestCase):
def test_singleton(self):
class A(RequestScopedSingleton):
def __init__(self, data):
self.data = data
class B(RequestScopedSingleton):
def __init__(self, data):
self.data = data
# TODO(psimakov): prevent direct instantiation
A('aaa')
B('bbb')
# using instance() creates and returns the same instance
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = A.instance('bar')
assert a.data == 'bar'
assert b.data == 'bar'
assert a is b
# re-initialization fails if arguments differ
RequestScopedSingleton.clear_all()
a = A.instance('dog')
try:
b = A.instance('cat')
raise Exception('Expected to fail.')
except AssertionError:
pass
# clearing one keep others
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = B.instance('cat')
a.clear()
c = B.instance('cat')
assert c is b
# clearing all clears all
RequestScopedSingleton.clear_all()
a = A.instance('bar')
b = B.instance('cat')
RequestScopedSingleton.clear_all()
c = A.instance('bar')
d = B.instance('cat')
assert a is not c
assert b is not d
def run_all_unit_tests():
"""Runs all unit tests in this module."""
suites_list = []
for test_class in [LRUCacheTests, SingletonTests]:
suite = unittest.TestLoader().loadTestsFromTestCase(test_class)
suites_list.append(suite)
unittest.TextTestRunner().run(unittest.TestSuite(suites_list))
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mapping from schema to backend properties."""
__author__ = 'Abhinav Khandelwal (abhinavk@google.com)'
import collections
import copy
import json
class Property(object):
"""Property."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, extra_schema_dict_values=None):
if name == 'properties':
raise ValueError('Cannot name a field "properties"; this conflicts '
'with the use of "properties" in generating JSON '
'schema dictionaries.')
self._name = name
self._label = label
self._property_type = property_type
self._select_data = select_data
self._description = description
self._optional = optional
self._extra_schema_dict_values = extra_schema_dict_values or {}
def __str__(self):
return '%s#%s' % (self._name, self._property_type)
@property
def type(self):
return self._property_type
@property
def name(self):
return self._name
@property
def description(self):
return self._description
@property
def extra_schema_dict_values(self):
return self._extra_schema_dict_values
@property
def label(self):
return self._label
def set_select_data(self, select_data):
self._select_data = select_data
def get_display_dict(self):
return {
'name': self._name,
'label': self._label,
'repeated': False,
'description': self._description,
}
class Registry(object):
"""Registry is a collection of Property's."""
def __init__(self, title, description=None, extra_schema_dict_values=None):
self._title = title
self._registry = {'id': title, 'type': 'object'}
self._description = description
if description:
self._registry['description'] = description
self._extra_schema_dict_values = extra_schema_dict_values
self._properties = []
self._sub_registries = collections.OrderedDict()
@property
def title(self):
return self._title
@property
def sub_registries(self):
return self._sub_registries
def add_property(self, schema_field):
"""Add a Property to this Registry."""
self._properties.append(schema_field)
def get_property(self, property_name):
for prop in self._properties:
if prop.name == property_name:
return prop
return None
def get_sub_registry(self, sub_registry_name):
return self._sub_registries.get(sub_registry_name)
def remove_property(self, property_name):
prop = self.get_property(property_name)
if prop:
return self._properties.pop(self._properties.index(prop))
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = Registry(title, description)
self._sub_registries[name] = registry
return registry
def has_subregistries(self):
return True if self._sub_registries else False
def get_display_dict(self):
return {
'title': self._title,
'properties': [p.get_display_dict() for p in self._properties],
'registries': [r.get_display_dict()
for r in self._sub_registries.itervalues()],
}
def clone_only_items_named(self, paths):
"""Clone only the selected items from a registry.
Args:
paths: Each item is a path into the schema, with slashes as
separators. E.g., "foo" would match things at the top level
named "foo". Similarly, 'foo/bar/baz' looks in sub-schema
"foo" for a sub-schema "bar", and within that, "baz." The
returned schema would include not just the leaf item, but
sub-registry 'foo' containing 'bar', containing 'baz'.
NOTE - Schema hierarchy components are stored separately from
properties, and so "foo" may well match _both_ a subschema
_and_ a property, if someone were unwise enough to build
a schema with overloaded names.
Also note that colons in names are not special to this function,
though they may well have special meaning to, e.g., the
course schema mapping to course.yaml dict hierarchy. Picking
out a single such field would use a name such as
"registration/course:send_welcome_notifications".
Returns:
A schema with only the named items present.
"""
# Arbitrary depth instantiate-on-reference dict constructor
treebuilder = lambda: collections.defaultdict(treebuilder)
# Build a tree of nodes from the given paths.
root = treebuilder()
for path in paths:
parts = path.split('/')
node = root
for part in parts:
node = node[part]
registry = copy.deepcopy(self)
def delete_all_but(registry, node):
# pylint: disable=protected-access
# Copy so deleting does not wreck iterator.
for prop in copy.copy(registry._properties):
if prop.name not in node:
registry._properties.remove(prop)
for name, value in registry._sub_registries.iteritems():
# If this subregistry is not named at all, remove it.
if name not in node:
del registry._sub_registries[name]
# If the paths-to-save gives sub-entries within this
# node, then proceed into the node to prune its members.
# Otherwise, do nothing, leaving the node and all its
# children in place.
elif node[name]:
delete_all_but(value, node[name])
delete_all_but(registry, root)
return registry
class SchemaField(Property):
"""SchemaField defines a simple field."""
def __init__(
self, name, label, property_type, select_data=None, description=None,
optional=False, hidden=False, editable=True, i18n=None,
extra_schema_dict_values=None, validator=None):
Property.__init__(
self, name, label, property_type, select_data=select_data,
description=description, optional=optional,
extra_schema_dict_values=extra_schema_dict_values)
self._hidden = hidden
self._editable = editable
self._validator = validator
self._i18n = i18n
@property
def hidden(self):
return self._hidden
@property
def editable(self):
return self._editable
@property
def i18n(self):
return self._i18n
def get_json_schema_dict(self):
"""Get the JSON schema for this field."""
prop = {}
prop['type'] = self._property_type
if self._optional:
prop['optional'] = self._optional
if self._description:
prop['description'] = self._description
if self._i18n:
prop['i18n'] = self._i18n
return prop
def _get_schema_dict(self, prefix_key):
"""Get Schema annotation dictionary for this field."""
if self._extra_schema_dict_values:
schema = self._extra_schema_dict_values
else:
schema = {}
schema['label'] = self._label
if self._hidden:
schema['_type'] = 'hidden'
elif not self._editable:
schema['_type'] = 'uneditable'
elif self._select_data and '_type' not in schema:
schema['_type'] = 'select'
if 'date' is self._property_type:
if 'dateFormat' not in schema:
schema['dateFormat'] = 'Y/m/d'
if 'valueFormat' not in schema:
schema['valueFormat'] = 'Y/m/d'
elif self._select_data:
choices = []
for value, label in self._select_data:
choices.append(
{'value': value, 'label': unicode(label)})
schema['choices'] = choices
if self._description:
schema['description'] = self._description
return [(prefix_key + ['_inputex'], schema)]
def validate(self, value, errors):
if self._validator:
self._validator(value, errors)
class FieldArray(SchemaField):
"""FieldArray is an array with object or simple items."""
def __init__(
self, name, label, description=None, item_type=None,
optional=False, extra_schema_dict_values=None):
super(FieldArray, self).__init__(
name, label, 'array', description=description, optional=optional,
extra_schema_dict_values=extra_schema_dict_values)
self._item_type = item_type
@property
def item_type(self):
return self._item_type
def get_json_schema_dict(self):
json_schema = super(FieldArray, self).get_json_schema_dict()
json_schema['items'] = self._item_type.get_json_schema_dict()
return json_schema
def _get_schema_dict(self, prefix_key):
dict_list = super(FieldArray, self)._get_schema_dict(prefix_key)
# pylint: disable=protected-access
dict_list += self._item_type._get_schema_dict(prefix_key + ['items'])
# pylint: enable=protected-access
return dict_list
def get_display_dict(self):
display_dict = super(FieldArray, self).get_display_dict()
display_dict['repeated'] = True
display_dict['item_type'] = self.item_type.get_display_dict()
return display_dict
class FieldRegistry(Registry):
"""FieldRegistry is an object with SchemaField properties."""
def add_sub_registry(
self, name, title=None, description=None, registry=None):
"""Add a sub registry to for this Registry."""
if not registry:
registry = FieldRegistry(title, description=description)
self._sub_registries[name] = registry
return registry
def get_json_schema_dict(self):
schema_dict = dict(self._registry)
schema_dict['properties'] = collections.OrderedDict()
for schema_field in self._properties:
schema_dict['properties'][schema_field.name] = (
schema_field.get_json_schema_dict())
for key in self._sub_registries.keys():
schema_dict['properties'][key] = (
self._sub_registries[key].get_json_schema_dict())
return schema_dict
def get_json_schema(self):
"""Get the json schema for this API."""
return json.dumps(self.get_json_schema_dict())
def _get_schema_dict(self, prefix_key):
"""Get schema dict for this API."""
title_key = list(prefix_key)
title_key.append('title')
schema_dict = [(title_key, self._title)]
if self._extra_schema_dict_values:
key = list(prefix_key)
key.append('_inputex')
schema_dict.append([key, self._extra_schema_dict_values])
base_key = list(prefix_key)
base_key.append('properties')
# pylint: disable=protected-access
for schema_field in self._properties:
key = base_key + [schema_field.name]
schema_dict += schema_field._get_schema_dict(key)
# pylint: enable=protected-access
for key in self._sub_registries.keys():
sub_registry_key_prefix = list(base_key)
sub_registry_key_prefix.append(key)
sub_registry = self._sub_registries[key]
# pylint: disable=protected-access
for entry in sub_registry._get_schema_dict(sub_registry_key_prefix):
schema_dict.append(entry)
# pylint: enable=protected-access
return schema_dict
def get_schema_dict(self):
"""Get schema dict for this API."""
return self._get_schema_dict(list())
@classmethod
def _add_entry(cls, key_part_list, value, entity):
if len(key_part_list) == 1:
entity[key_part_list[0]] = value
return
key = key_part_list.pop()
if not entity.has_key(key):
entity[key] = {}
else:
assert type(entity[key]) == type(dict())
cls._add_entry(key_part_list, value, entity[key])
@classmethod
def convert_json_to_entity(cls, json_entry, entity):
assert type(json_entry) == type(dict())
for key in json_entry.keys():
if type(json_entry[key]) == type(dict()):
cls.convert_json_to_entity(json_entry[key], entity)
else:
key_parts = key.split(':')
key_parts.reverse()
cls._add_entry(key_parts, json_entry[key], entity)
@classmethod
def _get_field_name_parts(cls, field_name):
field_name_parts = field_name.split(':')
field_name_parts.reverse()
return field_name_parts
@classmethod
def _get_field_value(cls, key_part_list, entity):
if len(key_part_list) == 1:
if type(entity) == dict and entity.has_key(key_part_list[0]):
return entity[key_part_list[0]]
return None
key = key_part_list.pop()
if entity.has_key(key):
return cls._get_field_value(key_part_list, entity[key])
return None
def convert_entity_to_json_entity(self, entity, json_entry):
for schema_field in self._properties:
field_name = schema_field.name
field_name_parts = self._get_field_name_parts(field_name)
value = self._get_field_value(field_name_parts, entity)
if type(value) != type(None):
json_entry[field_name] = value
for key in self._sub_registries.keys():
json_entry[key] = {}
self._sub_registries[key].convert_entity_to_json_entity(
entity, json_entry[key])
def validate(self, payload, errors):
for schema_field in self._properties:
field_name_parts = self._get_field_name_parts(schema_field.name)
value = self._get_field_value(field_name_parts, payload)
schema_field.validate(value, errors)
for registry in self._sub_registries.values():
registry.validate(payload, errors)
@classmethod
def is_complex_name(cls, name):
return ':' in name
@classmethod
def compute_name(cls, parent_names):
"""Computes non-indexed and indexed entity name given parent names."""
parts = []
for parent_name in parent_names:
if parent_name[0] == '[' and parent_name[-1] == ']':
parts.append('[]')
else:
parts.append(parent_name)
return ':'.join(parts), ':'.join(parent_names)
class SchemaFieldValue(object):
"""This class represents an instance of a field value."""
def __init__(self, name, field, value, setter):
"""An object that name, value and type of a field.
Args:
name: a name of the value
field: SchemaField object that holds the type
value: Python object that holds the value
setter: a function which sets the value in the underlying data
structure
"""
self._name = name
self._field = field
self._value = value
self._setter = setter
@property
def name(self):
return self._name
@property
def field(self):
return self._field
@property
def value(self):
return self._value
@value.setter
def value(self, new_value):
self._value = new_value
self._setter(new_value)
class FieldRegistryIndex(object):
"""Helper class that allows fast access to values and their fields."""
def __init__(self, registry):
self._registry = registry
self._names_in_order = []
self._complex_name_to_field = {}
self._computed_name_to_field = {}
@property
def registry(self):
return self._registry
@property
def names_in_order(self):
return self._names_in_order
def _inspect_registry(self, parent_names, registry):
"""Inspects registry and adds its items to the index."""
for field in registry._properties: # pylint: disable=protected-access
if registry.is_complex_name(field.name):
complex_name = field.name
if complex_name in self._complex_name_to_field:
raise KeyError('Field already defined: %s.' % complex_name)
if isinstance(field, FieldArray):
self._inspect_registry(
[complex_name, '[]'], field.item_type)
self._complex_name_to_field[complex_name] = field
self._names_in_order.append(complex_name)
else:
computed_name = ':'.join(parent_names + [field.name])
if computed_name in self._computed_name_to_field:
raise KeyError('Field already defined: %s.' % computed_name)
if isinstance(field, FieldArray):
self._inspect_registry(
parent_names + [field.name, '[]'], field.item_type)
self._computed_name_to_field[computed_name] = field
self._names_in_order.append(computed_name)
# pylint: disable=protected-access
for name, registry in registry._sub_registries.items():
self._inspect_registry(parent_names + [name], registry)
def rebuild(self):
"""Build an index."""
self._inspect_registry([], self._registry)
def find(self, name):
"""Finds and returns a field given field name."""
field = self._complex_name_to_field.get(name)
return field if field else self._computed_name_to_field.get(name)
class FieldFilter(object):
"""Filter for collections of schema fields."""
def __init__(
self, type_names=None, hidden_values=None, i18n_values=None,
editable_values=None):
self._type_names = type_names
self._hidden_values = hidden_values
self._i18n_values = i18n_values
self._editable_values = editable_values
def _filter(self, named_field_list):
"""Filters a list of name, SchemaField pairs."""
result = set()
for name, field in named_field_list:
if self._type_names and field.type not in self._type_names:
continue
if self._hidden_values and field.hidden not in self._hidden_values:
continue
if self._editable_values and (
field.editable not in self._editable_values):
continue
if self._i18n_values and field.i18n not in self._i18n_values:
continue
result.add(name)
return result
def filter_value_to_type_binding(self, binding):
"""Returns a set of value names that pass the criterion."""
named_field_list = [
(field_value.name, field_value.field)
for field_value in binding.value_list]
return self._filter(named_field_list)
def filter_field_registry_index(self, index):
"""Returns the field names in the schema that pass the criterion."""
named_field_list = [
(name, index.find(name)) for name in index.names_in_order]
return self._filter(named_field_list)
class ValueToTypeBinding(object):
"""This class provides mapping of entity attributes to their types."""
def __init__(self):
self.value_list = [] # a list of all encountered SchemaFieldValues
self.name_to_value = {} # field name to SchemaFieldValue mapping
self.name_to_field = {} # field name to SchemaField mapping
self.unmapped_names = set() # a set of field names where mapping failed
self.index = None # the indexed set of schema names
def find_value(self, name):
return self.name_to_value[name]
def find_field(self, name):
return self.name_to_field[name]
@classmethod
def _get_setter(cls, entity, key):
def setter(value):
entity[key] = value
return setter
@classmethod
def _visit_dict(cls, index, parent_names, entity, binding):
"""Visit dict entity."""
for _name, _value in entity.items():
cls._decompose_entity(
index, parent_names + [_name], _value, binding,
cls._get_setter(entity, _name))
@classmethod
def _visit_list(cls, index, parent_names, entity, binding, setter):
"""Visit list entity."""
name_no_index, name = index.registry.compute_name(parent_names)
_field = index.find(name_no_index)
if _field:
assert isinstance(_field, FieldArray)
assert name not in binding.name_to_field
binding.name_to_field[name] = _field
assert name not in binding.name_to_value, name
binding.name_to_value[name] = SchemaFieldValue(
name, _field, entity, setter)
for _index, _item in enumerate(entity):
_item_name = '[%s]' % _index
cls._decompose_entity(
index, parent_names + [_item_name], _item, binding,
cls._get_setter(entity, _index))
else:
assert name not in binding.unmapped_names
binding.unmapped_names.add(name)
@classmethod
def _visit_attribute(cls, index, parent_names, entity, binding, setter):
"""Visit simple attribute."""
name_no_index, name = index.registry.compute_name(parent_names)
_field = index.find(name_no_index)
if _field:
_value = SchemaFieldValue(name, _field, entity, setter)
binding.value_list.append(_value)
assert name not in binding.name_to_value, name
binding.name_to_value[name] = _value
assert name not in binding.name_to_field
binding.name_to_field[name] = _field
else:
assert name not in binding.unmapped_names, name
binding.unmapped_names.add(name)
@classmethod
def _decompose_entity(
cls, index, parent_names, entity, binding, setter):
"""Recursively decomposes entity."""
if isinstance(entity, dict):
cls._visit_dict(index, parent_names, entity, binding)
elif isinstance(entity, list):
cls._visit_list(index, parent_names, entity, binding, setter)
else:
cls._visit_attribute(index, parent_names, entity, binding, setter)
@classmethod
def bind_entity_to_schema(cls, json_dumpable_entity, registry):
"""Connects schema field type information to the entity attributes.
Args:
json_dumpable_entity: a Python dict recursively containing other
dict, list and primitive objects
registry: a FieldRegistry that holds entity type information
Returns:
an instance of ValueToTypeBinding object that maps entity attributes
to their types
"""
binding = ValueToTypeBinding()
index = FieldRegistryIndex(registry)
index.rebuild()
cls._decompose_entity(
index, [], json_dumpable_entity, binding, None)
binding.index = index
return binding
| Python |
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module providing simplistic logger."""
__author__ = 'Mike Gainer (mgainer@google.com)'
import datetime
import logging
import traceback
import appengine_config
_LOG_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
_CRITICAL = 'critical'
_WARNING = 'warning'
_INFO = 'info'
class CatchAndLog(object):
"""Simplistic logger allowing WebApp handlers to note errors for consumers.
During processing of a request, there may be recoverable errors and other
noteworthy events. This logger allows components to simply note these so
that they can be reported, rather than having to report only the first
problem, or trying to encode multiple events into a single HTTP response
code.
"""
class _Catcher(object):
"""Automatically note thrown exceptions as log messages."""
def __init__(self, log, consume_exceptions, message):
self._log = log
self._consume_exceptions = consume_exceptions
self._message = message
def __enter__(self):
return self
def __exit__(self, ex_type, value, tb):
if ex_type:
frame_tuple = list(traceback.extract_tb(tb)[-1])
frame_tuple[0] = frame_tuple[0].replace(
appengine_config.CODE_ROOT, '')
exception_message = (
'%s: %s: %s' %
(self._message, ex_type.__name__, str(value)))
if not appengine_config.PRODUCTION_MODE:
exception_message += (
' at %s' % traceback.format_list([frame_tuple])[0])
self._log.critical(exception_message)
return self._consume_exceptions
def __init__(self):
self._messages = []
def consume_exceptions(self, message):
"""Convert exceptions into 'critical' log messages.
This is a convenience function for use in contexts where exceptions
may be raised, but are not fatal and should not propagate. Usage:
with log.log_and_consume_exceptions("Arming mouse trap"):
mouse_trap.set_bait('Wensleydale')
mouse_trap.set_closing_force('critical personal injury')
mouse_trap.arm()
Args:
message: Prepended to exception messages to give more context.
E.g., suppose some calling code receives an exception:
OutOfCheeseException('Can't open pantry!'). That may be true,
neither is it very helpful. If this is expressed as:
Arming mouse trap: OutOfCheeseException: Can't open pantry!
then the external caller has a somewhat better idea of why
being out of cheese is a problem.
Returns:
A context manager for use in a 'with' statement.
"""
return CatchAndLog._Catcher(
self, consume_exceptions=True, message=message)
def propagate_exceptions(self, message):
"""Log exceptions as 'critical' log messages, and propagate them.
See log_and_consume_exceptions() for usage.
Args:
message: Prepended to exception messages to give more context.
Returns:
A context manager for use in a 'with' statement.
"""
return CatchAndLog._Catcher(
self, consume_exceptions=False, message=message)
def _log(self, level, message):
self._messages.append({
'message': message,
'level': level,
'timestamp': datetime.datetime.now().strftime(_LOG_DATE_FORMAT)})
def critical(self, message):
self._log(_CRITICAL, message)
logging.critical(message)
def warning(self, message):
self._log(_WARNING, message)
logging.warning(message)
def warn(self, message):
self._log(_WARNING, message)
logging.warning(message)
def info(self, message):
self._log(_INFO, message)
logging.info(message)
def get(self):
return self._messages
| Python |
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
files += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(someObject),
os.path.getsize(someObjectPath)
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%04d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = self.webUserFilesFolder + currentFolder + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 203, customMsg = "Extension not allowed" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2008 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '(?u)\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '/\\.(?![^.]*$)/', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '(?u)/\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[^\u0000-\u001f\u007f-\u009f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = getCurrentFolder(self.request.get("CurrentFolder",""))
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2008 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
# -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.1.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
| Python |
<!DOCTYPE html>
<html>
<head>
<link rel="icon" type="image/vnd.microsoft.icon" href="http://www.gstatic.com/codesite/ph/images/phosting.ico">
<script type="text/javascript">
var codesite_token = "O3xiHWt3_Jm_qZ8IPZfxTV1QHMQ:1324516583409";
var CS_env = {"profileUrl":["/u/114866778344639499534/"],"token":"O3xiHWt3_Jm_qZ8IPZfxTV1QHMQ:1324516583409","assetHostPath":"http://www.gstatic.com/codesite/ph","domainName":null,"assetVersionPath":"http://www.gstatic.com/codesite/ph/1847340689237817661","projectHomeUrl":"/p/5a-bucarest-vallee-cedric","relativeBaseUrl":"","projectName":"5a-bucarest-vallee-cedric","loggedInUserEmail":"V4LLEE.CEDRIC@gmail.com"};
var _gaq = _gaq || [];
_gaq.push(
['siteTracker._setAccount', 'UA-18071-1'],
['siteTracker._trackPageview']);
</script>
<title>ids.py -
5a-bucarest-vallee-cedric -
TP-Projet Erasmus Bucarest - Google Project Hosting
</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" >
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" >
<meta name="ROBOTS" content="NOARCHIVE">
<link type="text/css" rel="stylesheet" href="http://www.gstatic.com/codesite/ph/1847340689237817661/css/core.css">
<link type="text/css" rel="stylesheet" href="http://www.gstatic.com/codesite/ph/1847340689237817661/css/ph_detail.css" >
<link type="text/css" rel="stylesheet" href="http://www.gstatic.com/codesite/ph/1847340689237817661/css/d_sb.css" >
<!--[if IE]>
<link type="text/css" rel="stylesheet" href="http://www.gstatic.com/codesite/ph/1847340689237817661/css/d_ie.css" >
<![endif]-->
<style type="text/css">
.menuIcon.off { background: no-repeat url(http://www.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -42px }
.menuIcon.on { background: no-repeat url(http://www.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 -28px }
.menuIcon.down { background: no-repeat url(http://www.gstatic.com/codesite/ph/images/dropdown_sprite.gif) 0 0; }
tr.inline_comment {
background: #fff;
vertical-align: top;
}
div.draft, div.published {
padding: .3em;
border: 1px solid #999;
margin-bottom: .1em;
font-family: arial, sans-serif;
max-width: 60em;
}
div.draft {
background: #ffa;
}
div.published {
background: #e5ecf9;
}
div.published .body, div.draft .body {
padding: .5em .1em .1em .1em;
max-width: 60em;
white-space: pre-wrap;
white-space: -moz-pre-wrap;
white-space: -pre-wrap;
white-space: -o-pre-wrap;
word-wrap: break-word;
font-size: 1em;
}
div.draft .actions {
margin-left: 1em;
font-size: 90%;
}
div.draft form {
padding: .5em .5em .5em 0;
}
div.draft textarea, div.published textarea {
width: 95%;
height: 10em;
font-family: arial, sans-serif;
margin-bottom: .5em;
}
.nocursor, .nocursor td, .cursor_hidden, .cursor_hidden td {
background-color: white;
height: 2px;
}
.cursor, .cursor td {
background-color: darkblue;
height: 2px;
display: '';
}
.list {
border: 1px solid white;
border-bottom: 0;
}
</style>
</head>
<body class="t4">
<script type="text/javascript">
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(ga);
})();
</script>
<div class="headbg">
<div id="gaia">
<span>
<b>V4LLEE.CEDRIC@gmail.com</b>
| <a href="/u/114866778344639499534/" id="projects-dropdown" onclick="return false;"
><u>My favorites</u> <small>▼</small></a>
| <a href="/u/114866778344639499534/" onclick="_CS_click('/gb/ph/profile');"
title="Profile, Updates, and Settings"
><u>Profile</u></a>
| <a href="https://www.google.com/accounts/Logout?continue=http%3A%2F%2Fcode.google.com%2Fp%2F5a-bucarest-vallee-cedric%2Fsource%2Fbrowse%2Ftrunk%2Fnss%2Fhomework%2Fexo2%2Fids.py"
onclick="_CS_click('/gb/ph/signout');"
><u>Sign out</u></a>
</span>
</div>
<div class="gbh" style="left: 0pt;"></div>
<div class="gbh" style="right: 0pt;"></div>
<div style="height: 1px"></div>
<!--[if lte IE 7]>
<div style="text-align:center;">
Your version of Internet Explorer is not supported. Try a browser that
contributes to open source, such as <a href="http://www.firefox.com">Firefox</a>,
<a href="http://www.google.com/chrome">Google Chrome</a>, or
<a href="http://code.google.com/chrome/chromeframe/">Google Chrome Frame</a>.
</div>
<![endif]-->
<table style="padding:0px; margin: 0px 0px 10px 0px; width:100%" cellpadding="0" cellspacing="0"
itemscope itemtype="http://schema.org/CreativeWork">
<tr style="height: 58px;">
<td id="plogo">
<link itemprop="url" href="/p/5a-bucarest-vallee-cedric">
<a href="/p/5a-bucarest-vallee-cedric/">
<img src="http://www.gstatic.com/codesite/ph/images/defaultlogo.png" alt="Logo" itemprop="image">
</a>
</td>
<td style="padding-left: 0.5em">
<div id="pname">
<a href="/p/5a-bucarest-vallee-cedric/"><span itemprop="name">5a-bucarest-vallee-cedric</span></a>
</div>
<div id="psum">
<a id="project_summary_link"
href="/p/5a-bucarest-vallee-cedric/"><span itemprop="description">TP-Projet Erasmus Bucarest</span></a>
</div>
</td>
<td style="white-space:nowrap;text-align:right; vertical-align:bottom;">
<form action="/hosting/search">
<input size="30" name="q" value="" type="text">
<input type="submit" name="projectsearch" value="Search projects" >
</form>
</tr>
</table>
</div>
<div id="mt" class="gtb">
<a href="/p/5a-bucarest-vallee-cedric/" class="tab ">Project Home</a>
<a href="/p/5a-bucarest-vallee-cedric/downloads/list" class="tab ">Downloads</a>
<a href="/p/5a-bucarest-vallee-cedric/w/list" class="tab ">Wiki</a>
<a href="/p/5a-bucarest-vallee-cedric/issues/list"
class="tab ">Issues</a>
<a href="/p/5a-bucarest-vallee-cedric/source/checkout"
class="tab active">Source</a>
<a href="/p/5a-bucarest-vallee-cedric/admin"
class="tab inactive">Administer</a>
<div class=gtbc></div>
</div>
<table cellspacing="0" cellpadding="0" width="100%" align="center" border="0" class="st">
<tr>
<td class="subt">
<div class="st2">
<div class="isf">
<span class="inst1"><a href="/p/5a-bucarest-vallee-cedric/source/checkout">Checkout</a></span>
<span class="inst2"><a href="/p/5a-bucarest-vallee-cedric/source/browse/">Browse</a></span>
<span class="inst3"><a href="/p/5a-bucarest-vallee-cedric/source/list">Changes</a></span>
<form action="http://www.google.com/codesearch" method="get" style="display:inline"
onsubmit="document.getElementById('codesearchq').value = document.getElementById('origq').value + ' package:http://5a-bucarest-vallee-cedric\\.googlecode\\.com'">
<input type="hidden" name="q" id="codesearchq" value="">
<input type="text" maxlength="2048" size="38" id="origq" name="origq" value="" title="Google Code Search" style="font-size:92%"> <input type="submit" value="Search Trunk" name="btnG" style="font-size:92%">
<a href="/p/5a-bucarest-vallee-cedric/issues/entry?show=review&former=sourcelist">Request code review</a>
</form>
</div>
</div>
</td>
<td align="right" valign="top" class="bevel-right"></td>
</tr>
</table>
<script type="text/javascript">
var cancelBubble = false;
function _go(url) { document.location = url; }
</script>
<div id="maincol"
>
<!-- IE -->
<div class="expand">
<div id="colcontrol">
<style type="text/css">
#file_flipper { white-space: nowrap; padding-right: 2em; }
#file_flipper.hidden { display: none; }
#file_flipper .pagelink { color: #0000CC; text-decoration: underline; }
#file_flipper #visiblefiles { padding-left: 0.5em; padding-right: 0.5em; }
</style>
<table id="nav_and_rev" class="list"
cellpadding="0" cellspacing="0" width="100%">
<tr>
<td nowrap="nowrap" class="src_crumbs src_nav" width="33%">
<strong class="src_nav">Source path: </strong>
<span id="crumb_root">
<a href="/p/5a-bucarest-vallee-cedric/source/browse/">svn</a>/ </span>
<span id="crumb_links" class="ifClosed"><a href="/p/5a-bucarest-vallee-cedric/source/browse/trunk/">trunk</a><span class="sp">/ </span><a href="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/">nss</a><span class="sp">/ </span><a href="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/">homework</a><span class="sp">/ </span><a href="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/">exo2</a><span class="sp">/ </span>ids.py</span>
</td>
<td nowrap="nowrap" width="33%" align="center">
<a href="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/ids.py?edit=1"
><img src="http://www.gstatic.com/codesite/ph/images/pencil-y14.png"
class="edit_icon">Edit file</a>
</td>
<td nowrap="nowrap" width="33%" align="right">
<table cellpadding="0" cellspacing="0" style="font-size: 100%"><tr>
<td class="flipper">
<ul class="leftside">
<li><a href="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/ids.py?r=20" title="Previous">‹r20</a></li>
</ul>
</td>
<td class="flipper"><b>r22</b></td>
</tr></table>
</td>
</tr>
</table>
<div class="fc">
<style type="text/css">
.undermouse span {
background-image: url(http://www.gstatic.com/codesite/ph/images/comments.gif); }
</style>
<table class="opened" id="review_comment_area"
onmouseout="gutterOut()"><tr>
<td id="nums">
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
<pre><table width="100%" id="nums_table_0"><tr id="gr_svn22_1"
onmouseover="gutterOver(1)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',1);"> </span
></td><td id="1"><a href="#1">1</a></td></tr
><tr id="gr_svn22_2"
onmouseover="gutterOver(2)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',2);"> </span
></td><td id="2"><a href="#2">2</a></td></tr
><tr id="gr_svn22_3"
onmouseover="gutterOver(3)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',3);"> </span
></td><td id="3"><a href="#3">3</a></td></tr
><tr id="gr_svn22_4"
onmouseover="gutterOver(4)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',4);"> </span
></td><td id="4"><a href="#4">4</a></td></tr
><tr id="gr_svn22_5"
onmouseover="gutterOver(5)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',5);"> </span
></td><td id="5"><a href="#5">5</a></td></tr
><tr id="gr_svn22_6"
onmouseover="gutterOver(6)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',6);"> </span
></td><td id="6"><a href="#6">6</a></td></tr
><tr id="gr_svn22_7"
onmouseover="gutterOver(7)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',7);"> </span
></td><td id="7"><a href="#7">7</a></td></tr
><tr id="gr_svn22_8"
onmouseover="gutterOver(8)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',8);"> </span
></td><td id="8"><a href="#8">8</a></td></tr
><tr id="gr_svn22_9"
onmouseover="gutterOver(9)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',9);"> </span
></td><td id="9"><a href="#9">9</a></td></tr
><tr id="gr_svn22_10"
onmouseover="gutterOver(10)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',10);"> </span
></td><td id="10"><a href="#10">10</a></td></tr
><tr id="gr_svn22_11"
onmouseover="gutterOver(11)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',11);"> </span
></td><td id="11"><a href="#11">11</a></td></tr
><tr id="gr_svn22_12"
onmouseover="gutterOver(12)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',12);"> </span
></td><td id="12"><a href="#12">12</a></td></tr
><tr id="gr_svn22_13"
onmouseover="gutterOver(13)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',13);"> </span
></td><td id="13"><a href="#13">13</a></td></tr
><tr id="gr_svn22_14"
onmouseover="gutterOver(14)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',14);"> </span
></td><td id="14"><a href="#14">14</a></td></tr
><tr id="gr_svn22_15"
onmouseover="gutterOver(15)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',15);"> </span
></td><td id="15"><a href="#15">15</a></td></tr
><tr id="gr_svn22_16"
onmouseover="gutterOver(16)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',16);"> </span
></td><td id="16"><a href="#16">16</a></td></tr
><tr id="gr_svn22_17"
onmouseover="gutterOver(17)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',17);"> </span
></td><td id="17"><a href="#17">17</a></td></tr
><tr id="gr_svn22_18"
onmouseover="gutterOver(18)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',18);"> </span
></td><td id="18"><a href="#18">18</a></td></tr
><tr id="gr_svn22_19"
onmouseover="gutterOver(19)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',19);"> </span
></td><td id="19"><a href="#19">19</a></td></tr
><tr id="gr_svn22_20"
onmouseover="gutterOver(20)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',20);"> </span
></td><td id="20"><a href="#20">20</a></td></tr
><tr id="gr_svn22_21"
onmouseover="gutterOver(21)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',21);"> </span
></td><td id="21"><a href="#21">21</a></td></tr
><tr id="gr_svn22_22"
onmouseover="gutterOver(22)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',22);"> </span
></td><td id="22"><a href="#22">22</a></td></tr
><tr id="gr_svn22_23"
onmouseover="gutterOver(23)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',23);"> </span
></td><td id="23"><a href="#23">23</a></td></tr
><tr id="gr_svn22_24"
onmouseover="gutterOver(24)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',24);"> </span
></td><td id="24"><a href="#24">24</a></td></tr
><tr id="gr_svn22_25"
onmouseover="gutterOver(25)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',25);"> </span
></td><td id="25"><a href="#25">25</a></td></tr
><tr id="gr_svn22_26"
onmouseover="gutterOver(26)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',26);"> </span
></td><td id="26"><a href="#26">26</a></td></tr
><tr id="gr_svn22_27"
onmouseover="gutterOver(27)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',27);"> </span
></td><td id="27"><a href="#27">27</a></td></tr
><tr id="gr_svn22_28"
onmouseover="gutterOver(28)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',28);"> </span
></td><td id="28"><a href="#28">28</a></td></tr
><tr id="gr_svn22_29"
onmouseover="gutterOver(29)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',29);"> </span
></td><td id="29"><a href="#29">29</a></td></tr
><tr id="gr_svn22_30"
onmouseover="gutterOver(30)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',30);"> </span
></td><td id="30"><a href="#30">30</a></td></tr
><tr id="gr_svn22_31"
onmouseover="gutterOver(31)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',31);"> </span
></td><td id="31"><a href="#31">31</a></td></tr
><tr id="gr_svn22_32"
onmouseover="gutterOver(32)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',32);"> </span
></td><td id="32"><a href="#32">32</a></td></tr
><tr id="gr_svn22_33"
onmouseover="gutterOver(33)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',33);"> </span
></td><td id="33"><a href="#33">33</a></td></tr
><tr id="gr_svn22_34"
onmouseover="gutterOver(34)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',34);"> </span
></td><td id="34"><a href="#34">34</a></td></tr
><tr id="gr_svn22_35"
onmouseover="gutterOver(35)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',35);"> </span
></td><td id="35"><a href="#35">35</a></td></tr
><tr id="gr_svn22_36"
onmouseover="gutterOver(36)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',36);"> </span
></td><td id="36"><a href="#36">36</a></td></tr
><tr id="gr_svn22_37"
onmouseover="gutterOver(37)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',37);"> </span
></td><td id="37"><a href="#37">37</a></td></tr
><tr id="gr_svn22_38"
onmouseover="gutterOver(38)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',38);"> </span
></td><td id="38"><a href="#38">38</a></td></tr
><tr id="gr_svn22_39"
onmouseover="gutterOver(39)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',39);"> </span
></td><td id="39"><a href="#39">39</a></td></tr
><tr id="gr_svn22_40"
onmouseover="gutterOver(40)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',40);"> </span
></td><td id="40"><a href="#40">40</a></td></tr
><tr id="gr_svn22_41"
onmouseover="gutterOver(41)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',41);"> </span
></td><td id="41"><a href="#41">41</a></td></tr
><tr id="gr_svn22_42"
onmouseover="gutterOver(42)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',42);"> </span
></td><td id="42"><a href="#42">42</a></td></tr
><tr id="gr_svn22_43"
onmouseover="gutterOver(43)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',43);"> </span
></td><td id="43"><a href="#43">43</a></td></tr
><tr id="gr_svn22_44"
onmouseover="gutterOver(44)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',44);"> </span
></td><td id="44"><a href="#44">44</a></td></tr
><tr id="gr_svn22_45"
onmouseover="gutterOver(45)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',45);"> </span
></td><td id="45"><a href="#45">45</a></td></tr
><tr id="gr_svn22_46"
onmouseover="gutterOver(46)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',46);"> </span
></td><td id="46"><a href="#46">46</a></td></tr
><tr id="gr_svn22_47"
onmouseover="gutterOver(47)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',47);"> </span
></td><td id="47"><a href="#47">47</a></td></tr
><tr id="gr_svn22_48"
onmouseover="gutterOver(48)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',48);"> </span
></td><td id="48"><a href="#48">48</a></td></tr
><tr id="gr_svn22_49"
onmouseover="gutterOver(49)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',49);"> </span
></td><td id="49"><a href="#49">49</a></td></tr
><tr id="gr_svn22_50"
onmouseover="gutterOver(50)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',50);"> </span
></td><td id="50"><a href="#50">50</a></td></tr
><tr id="gr_svn22_51"
onmouseover="gutterOver(51)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',51);"> </span
></td><td id="51"><a href="#51">51</a></td></tr
><tr id="gr_svn22_52"
onmouseover="gutterOver(52)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',52);"> </span
></td><td id="52"><a href="#52">52</a></td></tr
><tr id="gr_svn22_53"
onmouseover="gutterOver(53)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',53);"> </span
></td><td id="53"><a href="#53">53</a></td></tr
><tr id="gr_svn22_54"
onmouseover="gutterOver(54)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',54);"> </span
></td><td id="54"><a href="#54">54</a></td></tr
><tr id="gr_svn22_55"
onmouseover="gutterOver(55)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',55);"> </span
></td><td id="55"><a href="#55">55</a></td></tr
><tr id="gr_svn22_56"
onmouseover="gutterOver(56)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',56);"> </span
></td><td id="56"><a href="#56">56</a></td></tr
><tr id="gr_svn22_57"
onmouseover="gutterOver(57)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',57);"> </span
></td><td id="57"><a href="#57">57</a></td></tr
><tr id="gr_svn22_58"
onmouseover="gutterOver(58)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',58);"> </span
></td><td id="58"><a href="#58">58</a></td></tr
><tr id="gr_svn22_59"
onmouseover="gutterOver(59)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',59);"> </span
></td><td id="59"><a href="#59">59</a></td></tr
><tr id="gr_svn22_60"
onmouseover="gutterOver(60)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',60);"> </span
></td><td id="60"><a href="#60">60</a></td></tr
><tr id="gr_svn22_61"
onmouseover="gutterOver(61)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',61);"> </span
></td><td id="61"><a href="#61">61</a></td></tr
><tr id="gr_svn22_62"
onmouseover="gutterOver(62)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',62);"> </span
></td><td id="62"><a href="#62">62</a></td></tr
><tr id="gr_svn22_63"
onmouseover="gutterOver(63)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',63);"> </span
></td><td id="63"><a href="#63">63</a></td></tr
><tr id="gr_svn22_64"
onmouseover="gutterOver(64)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',64);"> </span
></td><td id="64"><a href="#64">64</a></td></tr
><tr id="gr_svn22_65"
onmouseover="gutterOver(65)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',65);"> </span
></td><td id="65"><a href="#65">65</a></td></tr
><tr id="gr_svn22_66"
onmouseover="gutterOver(66)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',66);"> </span
></td><td id="66"><a href="#66">66</a></td></tr
><tr id="gr_svn22_67"
onmouseover="gutterOver(67)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',67);"> </span
></td><td id="67"><a href="#67">67</a></td></tr
><tr id="gr_svn22_68"
onmouseover="gutterOver(68)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',68);"> </span
></td><td id="68"><a href="#68">68</a></td></tr
><tr id="gr_svn22_69"
onmouseover="gutterOver(69)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',69);"> </span
></td><td id="69"><a href="#69">69</a></td></tr
><tr id="gr_svn22_70"
onmouseover="gutterOver(70)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',70);"> </span
></td><td id="70"><a href="#70">70</a></td></tr
><tr id="gr_svn22_71"
onmouseover="gutterOver(71)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',71);"> </span
></td><td id="71"><a href="#71">71</a></td></tr
><tr id="gr_svn22_72"
onmouseover="gutterOver(72)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',72);"> </span
></td><td id="72"><a href="#72">72</a></td></tr
><tr id="gr_svn22_73"
onmouseover="gutterOver(73)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',73);"> </span
></td><td id="73"><a href="#73">73</a></td></tr
><tr id="gr_svn22_74"
onmouseover="gutterOver(74)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',74);"> </span
></td><td id="74"><a href="#74">74</a></td></tr
><tr id="gr_svn22_75"
onmouseover="gutterOver(75)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',75);"> </span
></td><td id="75"><a href="#75">75</a></td></tr
><tr id="gr_svn22_76"
onmouseover="gutterOver(76)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',76);"> </span
></td><td id="76"><a href="#76">76</a></td></tr
><tr id="gr_svn22_77"
onmouseover="gutterOver(77)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',77);"> </span
></td><td id="77"><a href="#77">77</a></td></tr
><tr id="gr_svn22_78"
onmouseover="gutterOver(78)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',78);"> </span
></td><td id="78"><a href="#78">78</a></td></tr
><tr id="gr_svn22_79"
onmouseover="gutterOver(79)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',79);"> </span
></td><td id="79"><a href="#79">79</a></td></tr
><tr id="gr_svn22_80"
onmouseover="gutterOver(80)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',80);"> </span
></td><td id="80"><a href="#80">80</a></td></tr
><tr id="gr_svn22_81"
onmouseover="gutterOver(81)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',81);"> </span
></td><td id="81"><a href="#81">81</a></td></tr
><tr id="gr_svn22_82"
onmouseover="gutterOver(82)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',82);"> </span
></td><td id="82"><a href="#82">82</a></td></tr
><tr id="gr_svn22_83"
onmouseover="gutterOver(83)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',83);"> </span
></td><td id="83"><a href="#83">83</a></td></tr
><tr id="gr_svn22_84"
onmouseover="gutterOver(84)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',84);"> </span
></td><td id="84"><a href="#84">84</a></td></tr
><tr id="gr_svn22_85"
onmouseover="gutterOver(85)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',85);"> </span
></td><td id="85"><a href="#85">85</a></td></tr
><tr id="gr_svn22_86"
onmouseover="gutterOver(86)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',86);"> </span
></td><td id="86"><a href="#86">86</a></td></tr
><tr id="gr_svn22_87"
onmouseover="gutterOver(87)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',87);"> </span
></td><td id="87"><a href="#87">87</a></td></tr
><tr id="gr_svn22_88"
onmouseover="gutterOver(88)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',88);"> </span
></td><td id="88"><a href="#88">88</a></td></tr
><tr id="gr_svn22_89"
onmouseover="gutterOver(89)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',89);"> </span
></td><td id="89"><a href="#89">89</a></td></tr
><tr id="gr_svn22_90"
onmouseover="gutterOver(90)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',90);"> </span
></td><td id="90"><a href="#90">90</a></td></tr
><tr id="gr_svn22_91"
onmouseover="gutterOver(91)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',91);"> </span
></td><td id="91"><a href="#91">91</a></td></tr
><tr id="gr_svn22_92"
onmouseover="gutterOver(92)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',92);"> </span
></td><td id="92"><a href="#92">92</a></td></tr
><tr id="gr_svn22_93"
onmouseover="gutterOver(93)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',93);"> </span
></td><td id="93"><a href="#93">93</a></td></tr
><tr id="gr_svn22_94"
onmouseover="gutterOver(94)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',94);"> </span
></td><td id="94"><a href="#94">94</a></td></tr
><tr id="gr_svn22_95"
onmouseover="gutterOver(95)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',95);"> </span
></td><td id="95"><a href="#95">95</a></td></tr
><tr id="gr_svn22_96"
onmouseover="gutterOver(96)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',96);"> </span
></td><td id="96"><a href="#96">96</a></td></tr
><tr id="gr_svn22_97"
onmouseover="gutterOver(97)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',97);"> </span
></td><td id="97"><a href="#97">97</a></td></tr
><tr id="gr_svn22_98"
onmouseover="gutterOver(98)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',98);"> </span
></td><td id="98"><a href="#98">98</a></td></tr
><tr id="gr_svn22_99"
onmouseover="gutterOver(99)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',99);"> </span
></td><td id="99"><a href="#99">99</a></td></tr
><tr id="gr_svn22_100"
onmouseover="gutterOver(100)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',100);"> </span
></td><td id="100"><a href="#100">100</a></td></tr
><tr id="gr_svn22_101"
onmouseover="gutterOver(101)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',101);"> </span
></td><td id="101"><a href="#101">101</a></td></tr
><tr id="gr_svn22_102"
onmouseover="gutterOver(102)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',102);"> </span
></td><td id="102"><a href="#102">102</a></td></tr
><tr id="gr_svn22_103"
onmouseover="gutterOver(103)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',103);"> </span
></td><td id="103"><a href="#103">103</a></td></tr
><tr id="gr_svn22_104"
onmouseover="gutterOver(104)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',104);"> </span
></td><td id="104"><a href="#104">104</a></td></tr
><tr id="gr_svn22_105"
onmouseover="gutterOver(105)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',105);"> </span
></td><td id="105"><a href="#105">105</a></td></tr
><tr id="gr_svn22_106"
onmouseover="gutterOver(106)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',106);"> </span
></td><td id="106"><a href="#106">106</a></td></tr
><tr id="gr_svn22_107"
onmouseover="gutterOver(107)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',107);"> </span
></td><td id="107"><a href="#107">107</a></td></tr
><tr id="gr_svn22_108"
onmouseover="gutterOver(108)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',108);"> </span
></td><td id="108"><a href="#108">108</a></td></tr
><tr id="gr_svn22_109"
onmouseover="gutterOver(109)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',109);"> </span
></td><td id="109"><a href="#109">109</a></td></tr
><tr id="gr_svn22_110"
onmouseover="gutterOver(110)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',110);"> </span
></td><td id="110"><a href="#110">110</a></td></tr
><tr id="gr_svn22_111"
onmouseover="gutterOver(111)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',111);"> </span
></td><td id="111"><a href="#111">111</a></td></tr
><tr id="gr_svn22_112"
onmouseover="gutterOver(112)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',112);"> </span
></td><td id="112"><a href="#112">112</a></td></tr
><tr id="gr_svn22_113"
onmouseover="gutterOver(113)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',113);"> </span
></td><td id="113"><a href="#113">113</a></td></tr
><tr id="gr_svn22_114"
onmouseover="gutterOver(114)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',114);"> </span
></td><td id="114"><a href="#114">114</a></td></tr
><tr id="gr_svn22_115"
onmouseover="gutterOver(115)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',115);"> </span
></td><td id="115"><a href="#115">115</a></td></tr
><tr id="gr_svn22_116"
onmouseover="gutterOver(116)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',116);"> </span
></td><td id="116"><a href="#116">116</a></td></tr
><tr id="gr_svn22_117"
onmouseover="gutterOver(117)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',117);"> </span
></td><td id="117"><a href="#117">117</a></td></tr
><tr id="gr_svn22_118"
onmouseover="gutterOver(118)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',118);"> </span
></td><td id="118"><a href="#118">118</a></td></tr
><tr id="gr_svn22_119"
onmouseover="gutterOver(119)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',119);"> </span
></td><td id="119"><a href="#119">119</a></td></tr
><tr id="gr_svn22_120"
onmouseover="gutterOver(120)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',120);"> </span
></td><td id="120"><a href="#120">120</a></td></tr
><tr id="gr_svn22_121"
onmouseover="gutterOver(121)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',121);"> </span
></td><td id="121"><a href="#121">121</a></td></tr
><tr id="gr_svn22_122"
onmouseover="gutterOver(122)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',122);"> </span
></td><td id="122"><a href="#122">122</a></td></tr
><tr id="gr_svn22_123"
onmouseover="gutterOver(123)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',123);"> </span
></td><td id="123"><a href="#123">123</a></td></tr
><tr id="gr_svn22_124"
onmouseover="gutterOver(124)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',124);"> </span
></td><td id="124"><a href="#124">124</a></td></tr
><tr id="gr_svn22_125"
onmouseover="gutterOver(125)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',125);"> </span
></td><td id="125"><a href="#125">125</a></td></tr
><tr id="gr_svn22_126"
onmouseover="gutterOver(126)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',126);"> </span
></td><td id="126"><a href="#126">126</a></td></tr
><tr id="gr_svn22_127"
onmouseover="gutterOver(127)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',127);"> </span
></td><td id="127"><a href="#127">127</a></td></tr
><tr id="gr_svn22_128"
onmouseover="gutterOver(128)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',128);"> </span
></td><td id="128"><a href="#128">128</a></td></tr
><tr id="gr_svn22_129"
onmouseover="gutterOver(129)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',129);"> </span
></td><td id="129"><a href="#129">129</a></td></tr
><tr id="gr_svn22_130"
onmouseover="gutterOver(130)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',130);"> </span
></td><td id="130"><a href="#130">130</a></td></tr
><tr id="gr_svn22_131"
onmouseover="gutterOver(131)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',131);"> </span
></td><td id="131"><a href="#131">131</a></td></tr
><tr id="gr_svn22_132"
onmouseover="gutterOver(132)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',132);"> </span
></td><td id="132"><a href="#132">132</a></td></tr
><tr id="gr_svn22_133"
onmouseover="gutterOver(133)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',133);"> </span
></td><td id="133"><a href="#133">133</a></td></tr
><tr id="gr_svn22_134"
onmouseover="gutterOver(134)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',134);"> </span
></td><td id="134"><a href="#134">134</a></td></tr
><tr id="gr_svn22_135"
onmouseover="gutterOver(135)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',135);"> </span
></td><td id="135"><a href="#135">135</a></td></tr
><tr id="gr_svn22_136"
onmouseover="gutterOver(136)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',136);"> </span
></td><td id="136"><a href="#136">136</a></td></tr
><tr id="gr_svn22_137"
onmouseover="gutterOver(137)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',137);"> </span
></td><td id="137"><a href="#137">137</a></td></tr
><tr id="gr_svn22_138"
onmouseover="gutterOver(138)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',138);"> </span
></td><td id="138"><a href="#138">138</a></td></tr
><tr id="gr_svn22_139"
onmouseover="gutterOver(139)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',139);"> </span
></td><td id="139"><a href="#139">139</a></td></tr
><tr id="gr_svn22_140"
onmouseover="gutterOver(140)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',140);"> </span
></td><td id="140"><a href="#140">140</a></td></tr
><tr id="gr_svn22_141"
onmouseover="gutterOver(141)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',141);"> </span
></td><td id="141"><a href="#141">141</a></td></tr
><tr id="gr_svn22_142"
onmouseover="gutterOver(142)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',142);"> </span
></td><td id="142"><a href="#142">142</a></td></tr
><tr id="gr_svn22_143"
onmouseover="gutterOver(143)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',143);"> </span
></td><td id="143"><a href="#143">143</a></td></tr
><tr id="gr_svn22_144"
onmouseover="gutterOver(144)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',144);"> </span
></td><td id="144"><a href="#144">144</a></td></tr
><tr id="gr_svn22_145"
onmouseover="gutterOver(145)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',145);"> </span
></td><td id="145"><a href="#145">145</a></td></tr
><tr id="gr_svn22_146"
onmouseover="gutterOver(146)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',146);"> </span
></td><td id="146"><a href="#146">146</a></td></tr
><tr id="gr_svn22_147"
onmouseover="gutterOver(147)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',147);"> </span
></td><td id="147"><a href="#147">147</a></td></tr
><tr id="gr_svn22_148"
onmouseover="gutterOver(148)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',148);"> </span
></td><td id="148"><a href="#148">148</a></td></tr
><tr id="gr_svn22_149"
onmouseover="gutterOver(149)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',149);"> </span
></td><td id="149"><a href="#149">149</a></td></tr
><tr id="gr_svn22_150"
onmouseover="gutterOver(150)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',150);"> </span
></td><td id="150"><a href="#150">150</a></td></tr
><tr id="gr_svn22_151"
onmouseover="gutterOver(151)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',151);"> </span
></td><td id="151"><a href="#151">151</a></td></tr
><tr id="gr_svn22_152"
onmouseover="gutterOver(152)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',152);"> </span
></td><td id="152"><a href="#152">152</a></td></tr
><tr id="gr_svn22_153"
onmouseover="gutterOver(153)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',153);"> </span
></td><td id="153"><a href="#153">153</a></td></tr
><tr id="gr_svn22_154"
onmouseover="gutterOver(154)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',154);"> </span
></td><td id="154"><a href="#154">154</a></td></tr
><tr id="gr_svn22_155"
onmouseover="gutterOver(155)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',155);"> </span
></td><td id="155"><a href="#155">155</a></td></tr
><tr id="gr_svn22_156"
onmouseover="gutterOver(156)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',156);"> </span
></td><td id="156"><a href="#156">156</a></td></tr
><tr id="gr_svn22_157"
onmouseover="gutterOver(157)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',157);"> </span
></td><td id="157"><a href="#157">157</a></td></tr
><tr id="gr_svn22_158"
onmouseover="gutterOver(158)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',158);"> </span
></td><td id="158"><a href="#158">158</a></td></tr
><tr id="gr_svn22_159"
onmouseover="gutterOver(159)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',159);"> </span
></td><td id="159"><a href="#159">159</a></td></tr
><tr id="gr_svn22_160"
onmouseover="gutterOver(160)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',160);"> </span
></td><td id="160"><a href="#160">160</a></td></tr
><tr id="gr_svn22_161"
onmouseover="gutterOver(161)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',161);"> </span
></td><td id="161"><a href="#161">161</a></td></tr
><tr id="gr_svn22_162"
onmouseover="gutterOver(162)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',162);"> </span
></td><td id="162"><a href="#162">162</a></td></tr
><tr id="gr_svn22_163"
onmouseover="gutterOver(163)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',163);"> </span
></td><td id="163"><a href="#163">163</a></td></tr
><tr id="gr_svn22_164"
onmouseover="gutterOver(164)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',164);"> </span
></td><td id="164"><a href="#164">164</a></td></tr
><tr id="gr_svn22_165"
onmouseover="gutterOver(165)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',165);"> </span
></td><td id="165"><a href="#165">165</a></td></tr
><tr id="gr_svn22_166"
onmouseover="gutterOver(166)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',166);"> </span
></td><td id="166"><a href="#166">166</a></td></tr
><tr id="gr_svn22_167"
onmouseover="gutterOver(167)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',167);"> </span
></td><td id="167"><a href="#167">167</a></td></tr
><tr id="gr_svn22_168"
onmouseover="gutterOver(168)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',168);"> </span
></td><td id="168"><a href="#168">168</a></td></tr
><tr id="gr_svn22_169"
onmouseover="gutterOver(169)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',169);"> </span
></td><td id="169"><a href="#169">169</a></td></tr
><tr id="gr_svn22_170"
onmouseover="gutterOver(170)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',170);"> </span
></td><td id="170"><a href="#170">170</a></td></tr
><tr id="gr_svn22_171"
onmouseover="gutterOver(171)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',171);"> </span
></td><td id="171"><a href="#171">171</a></td></tr
><tr id="gr_svn22_172"
onmouseover="gutterOver(172)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',172);"> </span
></td><td id="172"><a href="#172">172</a></td></tr
><tr id="gr_svn22_173"
onmouseover="gutterOver(173)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',173);"> </span
></td><td id="173"><a href="#173">173</a></td></tr
><tr id="gr_svn22_174"
onmouseover="gutterOver(174)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',174);"> </span
></td><td id="174"><a href="#174">174</a></td></tr
><tr id="gr_svn22_175"
onmouseover="gutterOver(175)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',175);"> </span
></td><td id="175"><a href="#175">175</a></td></tr
><tr id="gr_svn22_176"
onmouseover="gutterOver(176)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',176);"> </span
></td><td id="176"><a href="#176">176</a></td></tr
><tr id="gr_svn22_177"
onmouseover="gutterOver(177)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',177);"> </span
></td><td id="177"><a href="#177">177</a></td></tr
><tr id="gr_svn22_178"
onmouseover="gutterOver(178)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',178);"> </span
></td><td id="178"><a href="#178">178</a></td></tr
><tr id="gr_svn22_179"
onmouseover="gutterOver(179)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',179);"> </span
></td><td id="179"><a href="#179">179</a></td></tr
><tr id="gr_svn22_180"
onmouseover="gutterOver(180)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',180);"> </span
></td><td id="180"><a href="#180">180</a></td></tr
><tr id="gr_svn22_181"
onmouseover="gutterOver(181)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',181);"> </span
></td><td id="181"><a href="#181">181</a></td></tr
><tr id="gr_svn22_182"
onmouseover="gutterOver(182)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',182);"> </span
></td><td id="182"><a href="#182">182</a></td></tr
><tr id="gr_svn22_183"
onmouseover="gutterOver(183)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',183);"> </span
></td><td id="183"><a href="#183">183</a></td></tr
><tr id="gr_svn22_184"
onmouseover="gutterOver(184)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',184);"> </span
></td><td id="184"><a href="#184">184</a></td></tr
><tr id="gr_svn22_185"
onmouseover="gutterOver(185)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',185);"> </span
></td><td id="185"><a href="#185">185</a></td></tr
><tr id="gr_svn22_186"
onmouseover="gutterOver(186)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',186);"> </span
></td><td id="186"><a href="#186">186</a></td></tr
><tr id="gr_svn22_187"
onmouseover="gutterOver(187)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',187);"> </span
></td><td id="187"><a href="#187">187</a></td></tr
><tr id="gr_svn22_188"
onmouseover="gutterOver(188)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',188);"> </span
></td><td id="188"><a href="#188">188</a></td></tr
><tr id="gr_svn22_189"
onmouseover="gutterOver(189)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',189);"> </span
></td><td id="189"><a href="#189">189</a></td></tr
><tr id="gr_svn22_190"
onmouseover="gutterOver(190)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',190);"> </span
></td><td id="190"><a href="#190">190</a></td></tr
><tr id="gr_svn22_191"
onmouseover="gutterOver(191)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',191);"> </span
></td><td id="191"><a href="#191">191</a></td></tr
><tr id="gr_svn22_192"
onmouseover="gutterOver(192)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',192);"> </span
></td><td id="192"><a href="#192">192</a></td></tr
><tr id="gr_svn22_193"
onmouseover="gutterOver(193)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',193);"> </span
></td><td id="193"><a href="#193">193</a></td></tr
><tr id="gr_svn22_194"
onmouseover="gutterOver(194)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',194);"> </span
></td><td id="194"><a href="#194">194</a></td></tr
><tr id="gr_svn22_195"
onmouseover="gutterOver(195)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',195);"> </span
></td><td id="195"><a href="#195">195</a></td></tr
><tr id="gr_svn22_196"
onmouseover="gutterOver(196)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',196);"> </span
></td><td id="196"><a href="#196">196</a></td></tr
><tr id="gr_svn22_197"
onmouseover="gutterOver(197)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',197);"> </span
></td><td id="197"><a href="#197">197</a></td></tr
><tr id="gr_svn22_198"
onmouseover="gutterOver(198)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',198);"> </span
></td><td id="198"><a href="#198">198</a></td></tr
><tr id="gr_svn22_199"
onmouseover="gutterOver(199)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',199);"> </span
></td><td id="199"><a href="#199">199</a></td></tr
><tr id="gr_svn22_200"
onmouseover="gutterOver(200)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',200);"> </span
></td><td id="200"><a href="#200">200</a></td></tr
><tr id="gr_svn22_201"
onmouseover="gutterOver(201)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',201);"> </span
></td><td id="201"><a href="#201">201</a></td></tr
><tr id="gr_svn22_202"
onmouseover="gutterOver(202)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',202);"> </span
></td><td id="202"><a href="#202">202</a></td></tr
><tr id="gr_svn22_203"
onmouseover="gutterOver(203)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',203);"> </span
></td><td id="203"><a href="#203">203</a></td></tr
><tr id="gr_svn22_204"
onmouseover="gutterOver(204)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',204);"> </span
></td><td id="204"><a href="#204">204</a></td></tr
><tr id="gr_svn22_205"
onmouseover="gutterOver(205)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',205);"> </span
></td><td id="205"><a href="#205">205</a></td></tr
><tr id="gr_svn22_206"
onmouseover="gutterOver(206)"
><td><span title="Add comment" onclick="codereviews.startEdit('svn22',206);"> </span
></td><td id="206"><a href="#206">206</a></td></tr
></table></pre>
<pre><table width="100%"><tr class="nocursor"><td></td></tr></table></pre>
</td>
<td id="lines">
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
<pre class="prettyprint lang-py"><table id="src_table_0"><tr
id=sl_svn22_1
onmouseover="gutterOver(1)"
><td class="source">#####################################################<br></td></tr
><tr
id=sl_svn22_2
onmouseover="gutterOver(2)"
><td class="source"># Python Watch and Warn Files Modification script #<br></td></tr
><tr
id=sl_svn22_3
onmouseover="gutterOver(3)"
><td class="source">#####################################################<br></td></tr
><tr
id=sl_svn22_4
onmouseover="gutterOver(4)"
><td class="source"># Vallee Cedric #<br></td></tr
><tr
id=sl_svn22_5
onmouseover="gutterOver(5)"
><td class="source"># 20/12/2011 #<br></td></tr
><tr
id=sl_svn22_6
onmouseover="gutterOver(6)"
><td class="source"># Network and Systems Security #<br></td></tr
><tr
id=sl_svn22_7
onmouseover="gutterOver(7)"
><td class="source">#####################################################<br></td></tr
><tr
id=sl_svn22_8
onmouseover="gutterOver(8)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_9
onmouseover="gutterOver(9)"
><td class="source">import re<br></td></tr
><tr
id=sl_svn22_10
onmouseover="gutterOver(10)"
><td class="source">import subprocess<br></td></tr
><tr
id=sl_svn22_11
onmouseover="gutterOver(11)"
><td class="source">import os<br></td></tr
><tr
id=sl_svn22_12
onmouseover="gutterOver(12)"
><td class="source">import hashlib<br></td></tr
><tr
id=sl_svn22_13
onmouseover="gutterOver(13)"
><td class="source">import smtplib<br></td></tr
><tr
id=sl_svn22_14
onmouseover="gutterOver(14)"
><td class="source">from email.MIMEText import MIMEText<br></td></tr
><tr
id=sl_svn22_15
onmouseover="gutterOver(15)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_16
onmouseover="gutterOver(16)"
><td class="source">#General analyser class : manage the mail sending <br></td></tr
><tr
id=sl_svn22_17
onmouseover="gutterOver(17)"
><td class="source">class Analyser:<br></td></tr
><tr
id=sl_svn22_18
onmouseover="gutterOver(18)"
><td class="source"> #Constructor<br></td></tr
><tr
id=sl_svn22_19
onmouseover="gutterOver(19)"
><td class="source"> #mail : where the mail will be sent<br></td></tr
><tr
id=sl_svn22_20
onmouseover="gutterOver(20)"
><td class="source"> #dbpath : db location<br></td></tr
><tr
id=sl_svn22_21
onmouseover="gutterOver(21)"
><td class="source"> #fileList : List of files to analyse, ids.py and ids.cfg are automatically added<br></td></tr
><tr
id=sl_svn22_22
onmouseover="gutterOver(22)"
><td class="source"> def __init__(self, mail, dbpath,fileList):<br></td></tr
><tr
id=sl_svn22_23
onmouseover="gutterOver(23)"
><td class="source"> self.mail = mail<br></td></tr
><tr
id=sl_svn22_24
onmouseover="gutterOver(24)"
><td class="source"> self.dbpath = dbpath<br></td></tr
><tr
id=sl_svn22_25
onmouseover="gutterOver(25)"
><td class="source"> self.fileList = fileList<br></td></tr
><tr
id=sl_svn22_26
onmouseover="gutterOver(26)"
><td class="source"> self.fileList.append("ids.cfg")<br></td></tr
><tr
id=sl_svn22_27
onmouseover="gutterOver(27)"
><td class="source"> self.fileList.append("ids.py")<br></td></tr
><tr
id=sl_svn22_28
onmouseover="gutterOver(28)"
><td class="source"> #report: string which store the mail which will be sent<br></td></tr
><tr
id=sl_svn22_29
onmouseover="gutterOver(29)"
><td class="source"> self.report=""<br></td></tr
><tr
id=sl_svn22_30
onmouseover="gutterOver(30)"
><td class="source"> self.run()<br></td></tr
><tr
id=sl_svn22_31
onmouseover="gutterOver(31)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_32
onmouseover="gutterOver(32)"
><td class="source"> #send the report to the self.mail<br></td></tr
><tr
id=sl_svn22_33
onmouseover="gutterOver(33)"
><td class="source"> def sendMail(self):<br></td></tr
><tr
id=sl_svn22_34
onmouseover="gutterOver(34)"
><td class="source"> if self.report != "":<br></td></tr
><tr
id=sl_svn22_35
onmouseover="gutterOver(35)"
><td class="source"> #header <br></td></tr
><tr
id=sl_svn22_36
onmouseover="gutterOver(36)"
><td class="source"> self.report="\tAutomatic report from ids.py script\n"+ self.report<br></td></tr
><tr
id=sl_svn22_37
onmouseover="gutterOver(37)"
><td class="source"> print self.report<br></td></tr
><tr
id=sl_svn22_38
onmouseover="gutterOver(38)"
><td class="source"> fromaddr = 'cvallee.insa@gmail.com' <br></td></tr
><tr
id=sl_svn22_39
onmouseover="gutterOver(39)"
><td class="source"> toaddrs = self.mail <br></td></tr
><tr
id=sl_svn22_40
onmouseover="gutterOver(40)"
><td class="source"> msg = self.report <br></td></tr
><tr
id=sl_svn22_41
onmouseover="gutterOver(41)"
><td class="source"> <br></td></tr
><tr
id=sl_svn22_42
onmouseover="gutterOver(42)"
><td class="source"> # Credentials for connecting on the google spam account <br></td></tr
><tr
id=sl_svn22_43
onmouseover="gutterOver(43)"
><td class="source"> username = 'nss.automatic.message@gmail.com' <br></td></tr
><tr
id=sl_svn22_44
onmouseover="gutterOver(44)"
><td class="source"> password = 'ThisIsMyPassword' <br></td></tr
><tr
id=sl_svn22_45
onmouseover="gutterOver(45)"
><td class="source"> <br></td></tr
><tr
id=sl_svn22_46
onmouseover="gutterOver(46)"
><td class="source"> # The actual mail send<br></td></tr
><tr
id=sl_svn22_47
onmouseover="gutterOver(47)"
><td class="source"> server = smtplib.SMTP('smtp.gmail.com:587') <br></td></tr
><tr
id=sl_svn22_48
onmouseover="gutterOver(48)"
><td class="source"> server.starttls() <br></td></tr
><tr
id=sl_svn22_49
onmouseover="gutterOver(49)"
><td class="source"> server.login(username,password) <br></td></tr
><tr
id=sl_svn22_50
onmouseover="gutterOver(50)"
><td class="source"> server.sendmail(fromaddr, toaddrs, msg) <br></td></tr
><tr
id=sl_svn22_51
onmouseover="gutterOver(51)"
><td class="source"> server.quit()<br></td></tr
><tr
id=sl_svn22_52
onmouseover="gutterOver(52)"
><td class="source"> self.report=""<br></td></tr
><tr
id=sl_svn22_53
onmouseover="gutterOver(53)"
><td class="source"> else:<br></td></tr
><tr
id=sl_svn22_54
onmouseover="gutterOver(54)"
><td class="source"> print "No modification"<br></td></tr
><tr
id=sl_svn22_55
onmouseover="gutterOver(55)"
><td class="source"> #the main method <br></td></tr
><tr
id=sl_svn22_56
onmouseover="gutterOver(56)"
><td class="source"> def Run(self):<br></td></tr
><tr
id=sl_svn22_57
onmouseover="gutterOver(57)"
><td class="source"> pass<br></td></tr
><tr
id=sl_svn22_58
onmouseover="gutterOver(58)"
><td class="source"> #database constructor<br></td></tr
><tr
id=sl_svn22_59
onmouseover="gutterOver(59)"
><td class="source"> def create(self):<br></td></tr
><tr
id=sl_svn22_60
onmouseover="gutterOver(60)"
><td class="source"> pass<br></td></tr
><tr
id=sl_svn22_61
onmouseover="gutterOver(61)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_62
onmouseover="gutterOver(62)"
><td class="source">#RpmAnalyser which is not yet implemented<br></td></tr
><tr
id=sl_svn22_63
onmouseover="gutterOver(63)"
><td class="source">class RpmAnalyser(Analyser):<br></td></tr
><tr
id=sl_svn22_64
onmouseover="gutterOver(64)"
><td class="source"> def Run(self):<br></td></tr
><tr
id=sl_svn22_65
onmouseover="gutterOver(65)"
><td class="source"> pass<br></td></tr
><tr
id=sl_svn22_66
onmouseover="gutterOver(66)"
><td class="source"> def Create(self):<br></td></tr
><tr
id=sl_svn22_67
onmouseover="gutterOver(67)"
><td class="source"> pass<br></td></tr
><tr
id=sl_svn22_68
onmouseover="gutterOver(68)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_69
onmouseover="gutterOver(69)"
><td class="source">#Analyser using the md5 hashcode for matching files<br></td></tr
><tr
id=sl_svn22_70
onmouseover="gutterOver(70)"
><td class="source">class MdAnalyser(Analyser):<br></td></tr
><tr
id=sl_svn22_71
onmouseover="gutterOver(71)"
><td class="source"> def create(self):<br></td></tr
><tr
id=sl_svn22_72
onmouseover="gutterOver(72)"
><td class="source"> #open the self.dbpath file in writing mode<br></td></tr
><tr
id=sl_svn22_73
onmouseover="gutterOver(73)"
><td class="source"> db= open(self.dbpath,'w')<br></td></tr
><tr
id=sl_svn22_74
onmouseover="gutterOver(74)"
><td class="source"> for i in self.fileList:<br></td></tr
><tr
id=sl_svn22_75
onmouseover="gutterOver(75)"
><td class="source"> try:<br></td></tr
><tr
id=sl_svn22_76
onmouseover="gutterOver(76)"
><td class="source"> #info=os.stat(i) brute information(same as ls)<br></td></tr
><tr
id=sl_svn22_77
onmouseover="gutterOver(77)"
><td class="source"> #if i is a directory then we compute the md5 of the ls -lia i string<br></td></tr
><tr
id=sl_svn22_78
onmouseover="gutterOver(78)"
><td class="source"> #if it's a file then we compute directly his md5<br></td></tr
><tr
id=sl_svn22_79
onmouseover="gutterOver(79)"
><td class="source"> if os.path.isdir(i):<br></td></tr
><tr
id=sl_svn22_80
onmouseover="gutterOver(80)"
><td class="source"> ls = subprocess.Popen(<br></td></tr
><tr
id=sl_svn22_81
onmouseover="gutterOver(81)"
><td class="source"> ["ls", i],<br></td></tr
><tr
id=sl_svn22_82
onmouseover="gutterOver(82)"
><td class="source"> stdout = subprocess.PIPE,<br></td></tr
><tr
id=sl_svn22_83
onmouseover="gutterOver(83)"
><td class="source"> stderr = subprocess.PIPE<br></td></tr
><tr
id=sl_svn22_84
onmouseover="gutterOver(84)"
><td class="source"> )<br></td></tr
><tr
id=sl_svn22_85
onmouseover="gutterOver(85)"
><td class="source"> out , error = ls.communicate()<br></td></tr
><tr
id=sl_svn22_86
onmouseover="gutterOver(86)"
><td class="source"> m=hashlib.md5(out)<br></td></tr
><tr
id=sl_svn22_87
onmouseover="gutterOver(87)"
><td class="source"> hashTmp=m.hexdigest()+" "+i+"\n"<br></td></tr
><tr
id=sl_svn22_88
onmouseover="gutterOver(88)"
><td class="source"> else:<br></td></tr
><tr
id=sl_svn22_89
onmouseover="gutterOver(89)"
><td class="source"> md5sum = subprocess.Popen(<br></td></tr
><tr
id=sl_svn22_90
onmouseover="gutterOver(90)"
><td class="source"> ["md5sum", i],<br></td></tr
><tr
id=sl_svn22_91
onmouseover="gutterOver(91)"
><td class="source"> stdout = subprocess.PIPE,<br></td></tr
><tr
id=sl_svn22_92
onmouseover="gutterOver(92)"
><td class="source"> stderr = subprocess.PIPE<br></td></tr
><tr
id=sl_svn22_93
onmouseover="gutterOver(93)"
><td class="source"> )<br></td></tr
><tr
id=sl_svn22_94
onmouseover="gutterOver(94)"
><td class="source"> hashTmp, error = md5sum.communicate()<br></td></tr
><tr
id=sl_svn22_95
onmouseover="gutterOver(95)"
><td class="source"> #write it in the db file<br></td></tr
><tr
id=sl_svn22_96
onmouseover="gutterOver(96)"
><td class="source"> db.write(hashTmp)<br></td></tr
><tr
id=sl_svn22_97
onmouseover="gutterOver(97)"
><td class="source"> except:<br></td></tr
><tr
id=sl_svn22_98
onmouseover="gutterOver(98)"
><td class="source"> self.report+= "The %s file is missing\n"%i<br></td></tr
><tr
id=sl_svn22_99
onmouseover="gutterOver(99)"
><td class="source"> #Report the creation<br></td></tr
><tr
id=sl_svn22_100
onmouseover="gutterOver(100)"
><td class="source"> self.report+="The database has been created"<br></td></tr
><tr
id=sl_svn22_101
onmouseover="gutterOver(101)"
><td class="source"> db.close()<br></td></tr
><tr
id=sl_svn22_102
onmouseover="gutterOver(102)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_103
onmouseover="gutterOver(103)"
><td class="source"> def run(self):<br></td></tr
><tr
id=sl_svn22_104
onmouseover="gutterOver(104)"
><td class="source"> dbmodified=False<br></td></tr
><tr
id=sl_svn22_105
onmouseover="gutterOver(105)"
><td class="source"> if not os.path.exists(self.dbpath):<br></td></tr
><tr
id=sl_svn22_106
onmouseover="gutterOver(106)"
><td class="source"> #if the db file doesn't exist then we create it<br></td></tr
><tr
id=sl_svn22_107
onmouseover="gutterOver(107)"
><td class="source"> self.create()<br></td></tr
><tr
id=sl_svn22_108
onmouseover="gutterOver(108)"
><td class="source"> else: <br></td></tr
><tr
id=sl_svn22_109
onmouseover="gutterOver(109)"
><td class="source"> #open, put the former md5 value in a list, and remove the '\n' char<br></td></tr
><tr
id=sl_svn22_110
onmouseover="gutterOver(110)"
><td class="source"> dbfile=open(self.dbpath,'r')<br></td></tr
><tr
id=sl_svn22_111
onmouseover="gutterOver(111)"
><td class="source"> reg=re.compile("(\w*)\s*([^\s]*)\n")<br></td></tr
><tr
id=sl_svn22_112
onmouseover="gutterOver(112)"
><td class="source"> db=dbfile.readlines()<br></td></tr
><tr
id=sl_svn22_113
onmouseover="gutterOver(113)"
><td class="source"> dbfile.close()<br></td></tr
><tr
id=sl_svn22_114
onmouseover="gutterOver(114)"
><td class="source"> db = map(lambda x: x.strip(),db) <br></td></tr
><tr
id=sl_svn22_115
onmouseover="gutterOver(115)"
><td class="source"> #we look for each file of the config file<br></td></tr
><tr
id=sl_svn22_116
onmouseover="gutterOver(116)"
><td class="source"> for i in self.fileList:<br></td></tr
><tr
id=sl_svn22_117
onmouseover="gutterOver(117)"
><td class="source"> try:<br></td></tr
><tr
id=sl_svn22_118
onmouseover="gutterOver(118)"
><td class="source"> #info=os.stat(i) #brute information(same as ls)<br></td></tr
><tr
id=sl_svn22_119
onmouseover="gutterOver(119)"
><td class="source"> #if i is a directory then we compute the md5 of the ls -lia i string<br></td></tr
><tr
id=sl_svn22_120
onmouseover="gutterOver(120)"
><td class="source"> #if it's a file then we compute directly his md5<br></td></tr
><tr
id=sl_svn22_121
onmouseover="gutterOver(121)"
><td class="source"> if os.path.isdir(i):<br></td></tr
><tr
id=sl_svn22_122
onmouseover="gutterOver(122)"
><td class="source"> ls = subprocess.Popen(<br></td></tr
><tr
id=sl_svn22_123
onmouseover="gutterOver(123)"
><td class="source"> ["ls", i],<br></td></tr
><tr
id=sl_svn22_124
onmouseover="gutterOver(124)"
><td class="source"> stdout = subprocess.PIPE,<br></td></tr
><tr
id=sl_svn22_125
onmouseover="gutterOver(125)"
><td class="source"> stderr = subprocess.PIPE<br></td></tr
><tr
id=sl_svn22_126
onmouseover="gutterOver(126)"
><td class="source"> )<br></td></tr
><tr
id=sl_svn22_127
onmouseover="gutterOver(127)"
><td class="source"> out , error = ls.communicate()<br></td></tr
><tr
id=sl_svn22_128
onmouseover="gutterOver(128)"
><td class="source"> m=hashlib.md5(out)<br></td></tr
><tr
id=sl_svn22_129
onmouseover="gutterOver(129)"
><td class="source"> hashTmp=m.hexdigest()+" "+i<br></td></tr
><tr
id=sl_svn22_130
onmouseover="gutterOver(130)"
><td class="source"> else:<br></td></tr
><tr
id=sl_svn22_131
onmouseover="gutterOver(131)"
><td class="source"> md5sum = subprocess.Popen(<br></td></tr
><tr
id=sl_svn22_132
onmouseover="gutterOver(132)"
><td class="source"> ["md5sum", i],<br></td></tr
><tr
id=sl_svn22_133
onmouseover="gutterOver(133)"
><td class="source"> stdout = subprocess.PIPE,<br></td></tr
><tr
id=sl_svn22_134
onmouseover="gutterOver(134)"
><td class="source"> stderr = subprocess.PIPE<br></td></tr
><tr
id=sl_svn22_135
onmouseover="gutterOver(135)"
><td class="source"> )<br></td></tr
><tr
id=sl_svn22_136
onmouseover="gutterOver(136)"
><td class="source"> hashTmp, error = md5sum.communicate()<br></td></tr
><tr
id=sl_svn22_137
onmouseover="gutterOver(137)"
><td class="source"> hashTmp=hashTmp[:-1]<br></td></tr
><tr
id=sl_svn22_138
onmouseover="gutterOver(138)"
><td class="source"> #if the current md5 value and the former one are different then the files have been modified <br></td></tr
><tr
id=sl_svn22_139
onmouseover="gutterOver(139)"
><td class="source"> #so we put it in the report and update the md5 value to the new one<br></td></tr
><tr
id=sl_svn22_140
onmouseover="gutterOver(140)"
><td class="source"> if not hashTmp in db and not i == self.dbpath :<br></td></tr
><tr
id=sl_svn22_141
onmouseover="gutterOver(141)"
><td class="source"> #Update of the database<br></td></tr
><tr
id=sl_svn22_142
onmouseover="gutterOver(142)"
><td class="source"> for n in db:<br></td></tr
><tr
id=sl_svn22_143
onmouseover="gutterOver(143)"
><td class="source"> if i in n:<br></td></tr
><tr
id=sl_svn22_144
onmouseover="gutterOver(144)"
><td class="source"> db.remove(n)<br></td></tr
><tr
id=sl_svn22_145
onmouseover="gutterOver(145)"
><td class="source"> db.append(hashTmp)<br></td></tr
><tr
id=sl_svn22_146
onmouseover="gutterOver(146)"
><td class="source"> break<br></td></tr
><tr
id=sl_svn22_147
onmouseover="gutterOver(147)"
><td class="source"> dbmodified=True<br></td></tr
><tr
id=sl_svn22_148
onmouseover="gutterOver(148)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_149
onmouseover="gutterOver(149)"
><td class="source"> #Additionnal information for report<br></td></tr
><tr
id=sl_svn22_150
onmouseover="gutterOver(150)"
><td class="source"> #We join the ls -lia of the current file or directory that have been modified<br></td></tr
><tr
id=sl_svn22_151
onmouseover="gutterOver(151)"
><td class="source"> self.report+="File %s has been modificated\n"%i<br></td></tr
><tr
id=sl_svn22_152
onmouseover="gutterOver(152)"
><td class="source"> out, err = subprocess.Popen(<br></td></tr
><tr
id=sl_svn22_153
onmouseover="gutterOver(153)"
><td class="source"> ["ls", "-lia", i],<br></td></tr
><tr
id=sl_svn22_154
onmouseover="gutterOver(154)"
><td class="source"> stdout = subprocess.PIPE,<br></td></tr
><tr
id=sl_svn22_155
onmouseover="gutterOver(155)"
><td class="source"> stderr = subprocess.PIPE<br></td></tr
><tr
id=sl_svn22_156
onmouseover="gutterOver(156)"
><td class="source"> ).communicate()<br></td></tr
><tr
id=sl_svn22_157
onmouseover="gutterOver(157)"
><td class="source"> self.report+=out+"\n"<br></td></tr
><tr
id=sl_svn22_158
onmouseover="gutterOver(158)"
><td class="source"> except:<br></td></tr
><tr
id=sl_svn22_159
onmouseover="gutterOver(159)"
><td class="source"> self.report+= "The %s file is missing\n"%i<br></td></tr
><tr
id=sl_svn22_160
onmouseover="gutterOver(160)"
><td class="source"> if dbmodified:<br></td></tr
><tr
id=sl_svn22_161
onmouseover="gutterOver(161)"
><td class="source"> #if there was a modification of one of the files then we update the db file for the next call<br></td></tr
><tr
id=sl_svn22_162
onmouseover="gutterOver(162)"
><td class="source"> dbfile= open(self.dbpath,'w')<br></td></tr
><tr
id=sl_svn22_163
onmouseover="gutterOver(163)"
><td class="source"> dbfile.write("\n".join(db))<br></td></tr
><tr
id=sl_svn22_164
onmouseover="gutterOver(164)"
><td class="source"> self.report+="The database has been updated"<br></td></tr
><tr
id=sl_svn22_165
onmouseover="gutterOver(165)"
><td class="source"> dbfile.close()<br></td></tr
><tr
id=sl_svn22_166
onmouseover="gutterOver(166)"
><td class="source"> #we check if we need to send the report<br></td></tr
><tr
id=sl_svn22_167
onmouseover="gutterOver(167)"
><td class="source"> self.sendMail()<br></td></tr
><tr
id=sl_svn22_168
onmouseover="gutterOver(168)"
><td class="source"><br></td></tr
><tr
id=sl_svn22_169
onmouseover="gutterOver(169)"
><td class="source">def main():<br></td></tr
><tr
id=sl_svn22_170
onmouseover="gutterOver(170)"
><td class="source"> try:<br></td></tr
><tr
id=sl_svn22_171
onmouseover="gutterOver(171)"
><td class="source"> cfgfile=open('ids.cfg','r')<br></td></tr
><tr
id=sl_svn22_172
onmouseover="gutterOver(172)"
><td class="source"> except :<br></td></tr
><tr
id=sl_svn22_173
onmouseover="gutterOver(173)"
><td class="source"> #raise Exception('config file not found\nIds abort\n')<br></td></tr
><tr
id=sl_svn22_174
onmouseover="gutterOver(174)"
><td class="source"> print '\tconfiguration file not found\n\tIds script aborted\n'<br></td></tr
><tr
id=sl_svn22_175
onmouseover="gutterOver(175)"
><td class="source"> return <br></td></tr
><tr
id=sl_svn22_176
onmouseover="gutterOver(176)"
><td class="source"> mail=""<br></td></tr
><tr
id=sl_svn22_177
onmouseover="gutterOver(177)"
><td class="source"> rpm=False<br></td></tr
><tr
id=sl_svn22_178
onmouseover="gutterOver(178)"
><td class="source"> dbpath=""<br></td></tr
><tr
id=sl_svn22_179
onmouseover="gutterOver(179)"
><td class="source"> file=False<br></td></tr
><tr
id=sl_svn22_180
onmouseover="gutterOver(180)"
><td class="source"> fileList=[]<br></td></tr
><tr
id=sl_svn22_181
onmouseover="gutterOver(181)"
><td class="source"> #Processing the configuration file, and looking for the specials patterns<br></td></tr
><tr
id=sl_svn22_182
onmouseover="gutterOver(182)"
><td class="source"> for line in cfgfile:<br></td></tr
><tr
id=sl_svn22_183
onmouseover="gutterOver(183)"
><td class="source"> mailPat = re.compile("(email|e-mail)\s*:\s*([^\s]*)",re.IGNORECASE)<br></td></tr
><tr
id=sl_svn22_184
onmouseover="gutterOver(184)"
><td class="source"> pathPat = re.compile("(db|path)\s*:\s*([^\s]*)",re.IGNORECASE)<br></td></tr
><tr
id=sl_svn22_185
onmouseover="gutterOver(185)"
><td class="source"> lPat = re.compile("#.*list of files{0,1}.*#",re.IGNORECASE) <br></td></tr
><tr
id=sl_svn22_186
onmouseover="gutterOver(186)"
><td class="source"> if file and line != '\n' and not line[0] == '#' :<br></td></tr
><tr
id=sl_svn22_187
onmouseover="gutterOver(187)"
><td class="source"> fileList.append(line[:-1])<br></td></tr
><tr
id=sl_svn22_188
onmouseover="gutterOver(188)"
><td class="source"> elif mailPat.match(line):<br></td></tr
><tr
id=sl_svn22_189
onmouseover="gutterOver(189)"
><td class="source"> mail = mailPat.match(line).group(2)<br></td></tr
><tr
id=sl_svn22_190
onmouseover="gutterOver(190)"
><td class="source"> elif pathPat.match(line):<br></td></tr
><tr
id=sl_svn22_191
onmouseover="gutterOver(191)"
><td class="source"> dbpath=pathPat.match(line).group(2)<br></td></tr
><tr
id=sl_svn22_192
onmouseover="gutterOver(192)"
><td class="source"> elif re.match("option\s*:\s*rpm",line,re.IGNORECASE):<br></td></tr
><tr
id=sl_svn22_193
onmouseover="gutterOver(193)"
><td class="source"> rpm=True<br></td></tr
><tr
id=sl_svn22_194
onmouseover="gutterOver(194)"
><td class="source"> elif lPat.match(line):<br></td></tr
><tr
id=sl_svn22_195
onmouseover="gutterOver(195)"
><td class="source"> file=True <br></td></tr
><tr
id=sl_svn22_196
onmouseover="gutterOver(196)"
><td class="source"> else:<br></td></tr
><tr
id=sl_svn22_197
onmouseover="gutterOver(197)"
><td class="source"> #Comment or unknown option<br></td></tr
><tr
id=sl_svn22_198
onmouseover="gutterOver(198)"
><td class="source"> pass<br></td></tr
><tr
id=sl_svn22_199
onmouseover="gutterOver(199)"
><td class="source"> if rpm:<br></td></tr
><tr
id=sl_svn22_200
onmouseover="gutterOver(200)"
><td class="source"> RpmAnalyser(mail,dbpath,fileList)<br></td></tr
><tr
id=sl_svn22_201
onmouseover="gutterOver(201)"
><td class="source"> else:<br></td></tr
><tr
id=sl_svn22_202
onmouseover="gutterOver(202)"
><td class="source"> MdAnalyser(mail,dbpath,fileList)<br></td></tr
><tr
id=sl_svn22_203
onmouseover="gutterOver(203)"
><td class="source"> <br></td></tr
><tr
id=sl_svn22_204
onmouseover="gutterOver(204)"
><td class="source"> <br></td></tr
><tr
id=sl_svn22_205
onmouseover="gutterOver(205)"
><td class="source">if __name__ == "__main__":<br></td></tr
><tr
id=sl_svn22_206
onmouseover="gutterOver(206)"
><td class="source"> main()<br></td></tr
></table></pre>
<pre><table width="100%"><tr class="cursor_stop cursor_hidden"><td></td></tr></table></pre>
</td>
</tr></table>
<script type="text/javascript">
var lineNumUnderMouse = -1;
function gutterOver(num) {
gutterOut();
var newTR = document.getElementById('gr_svn22_' + num);
if (newTR) {
newTR.className = 'undermouse';
}
lineNumUnderMouse = num;
}
function gutterOut() {
if (lineNumUnderMouse != -1) {
var oldTR = document.getElementById(
'gr_svn22_' + lineNumUnderMouse);
if (oldTR) {
oldTR.className = '';
}
lineNumUnderMouse = -1;
}
}
var numsGenState = {table_base_id: 'nums_table_'};
var srcGenState = {table_base_id: 'src_table_'};
var alignerRunning = false;
var startOver = false;
function setLineNumberHeights() {
if (alignerRunning) {
startOver = true;
return;
}
numsGenState.chunk_id = 0;
numsGenState.table = document.getElementById('nums_table_0');
numsGenState.row_num = 0;
if (!numsGenState.table) {
return; // Silently exit if no file is present.
}
srcGenState.chunk_id = 0;
srcGenState.table = document.getElementById('src_table_0');
srcGenState.row_num = 0;
alignerRunning = true;
continueToSetLineNumberHeights();
}
function rowGenerator(genState) {
if (genState.row_num < genState.table.rows.length) {
var currentRow = genState.table.rows[genState.row_num];
genState.row_num++;
return currentRow;
}
var newTable = document.getElementById(
genState.table_base_id + (genState.chunk_id + 1));
if (newTable) {
genState.chunk_id++;
genState.row_num = 0;
genState.table = newTable;
return genState.table.rows[0];
}
return null;
}
var MAX_ROWS_PER_PASS = 1000;
function continueToSetLineNumberHeights() {
var rowsInThisPass = 0;
var numRow = 1;
var srcRow = 1;
while (numRow && srcRow && rowsInThisPass < MAX_ROWS_PER_PASS) {
numRow = rowGenerator(numsGenState);
srcRow = rowGenerator(srcGenState);
rowsInThisPass++;
if (numRow && srcRow) {
if (numRow.offsetHeight != srcRow.offsetHeight) {
numRow.firstChild.style.height = srcRow.offsetHeight + 'px';
}
}
}
if (rowsInThisPass >= MAX_ROWS_PER_PASS) {
setTimeout(continueToSetLineNumberHeights, 10);
} else {
alignerRunning = false;
if (startOver) {
startOver = false;
setTimeout(setLineNumberHeights, 500);
}
}
}
function initLineNumberHeights() {
// Do 2 complete passes, because there can be races
// between this code and prettify.
startOver = true;
setTimeout(setLineNumberHeights, 250);
window.onresize = setLineNumberHeights;
}
initLineNumberHeights();
</script>
<div id="log">
<div style="text-align:right">
<a class="ifCollapse" href="#" onclick="_toggleMeta(this); return false">Show details</a>
<a class="ifExpand" href="#" onclick="_toggleMeta(this); return false">Hide details</a>
</div>
<div class="ifExpand">
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="changelog">
<p>Change log</p>
<div>
<a href="/p/5a-bucarest-vallee-cedric/source/detail?spec=svn22&r=21">r21</a>
by V4LLEE.CEDRIC@gmail.com
on Today (25 minutes ago)
<a href="/p/5a-bucarest-vallee-cedric/source/diff?spec=svn22&r=21&format=side&path=/trunk/nss/homework/exo2/ids.py&old_path=/trunk/nss/homework/exo2/ids.py&old=20">Diff</a>
</div>
<pre>ok commentaire exo2</pre>
</div>
<script type="text/javascript">
var detail_url = '/p/5a-bucarest-vallee-cedric/source/detail?r=21&spec=svn22';
var publish_url = '/p/5a-bucarest-vallee-cedric/source/detail?r=21&spec=svn22#publish';
// describe the paths of this revision in javascript.
var changed_paths = [];
var changed_urls = [];
changed_paths.push('/trunk/nss/homework/exo1');
changed_urls.push('/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo1?r\x3d21\x26spec\x3dsvn22');
changed_paths.push('/trunk/nss/homework/exo1/scanner.py');
changed_urls.push('/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo1/scanner.py?r\x3d21\x26spec\x3dsvn22');
changed_paths.push('/trunk/nss/homework/exo2/db.md5');
changed_urls.push('/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/db.md5?r\x3d21\x26spec\x3dsvn22');
changed_paths.push('/trunk/nss/homework/exo2/ids.cfg');
changed_urls.push('/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/ids.cfg?r\x3d21\x26spec\x3dsvn22');
changed_paths.push('/trunk/nss/homework/exo2/ids.py');
changed_urls.push('/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/ids.py?r\x3d21\x26spec\x3dsvn22');
var selected_path = '/trunk/nss/homework/exo2/ids.py';
function getCurrentPageIndex() {
for (var i = 0; i < changed_paths.length; i++) {
if (selected_path == changed_paths[i]) {
return i;
}
}
}
function getNextPage() {
var i = getCurrentPageIndex();
if (i < changed_paths.length - 1) {
return changed_urls[i + 1];
}
return null;
}
function getPreviousPage() {
var i = getCurrentPageIndex();
if (i > 0) {
return changed_urls[i - 1];
}
return null;
}
function gotoNextPage() {
var page = getNextPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoPreviousPage() {
var page = getPreviousPage();
if (!page) {
page = detail_url;
}
window.location = page;
}
function gotoDetailPage() {
window.location = detail_url;
}
function gotoPublishPage() {
window.location = publish_url;
}
</script>
<style type="text/css">
#review_nav {
border-top: 3px solid white;
padding-top: 6px;
margin-top: 1em;
}
#review_nav td {
vertical-align: middle;
}
#review_nav select {
margin: .5em 0;
}
</style>
<div id="review_nav">
<table><tr><td>Go to: </td><td>
<select name="files_in_rev" onchange="window.location=this.value">
<option value="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo1?r=21&spec=svn22"
>/trunk/nss/homework/exo1</option>
<option value="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo1/scanner.py?r=21&spec=svn22"
>/trunk/nss/homework/exo1/scanner.py</option>
<option value="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/db.md5?r=21&spec=svn22"
>/trunk/nss/homework/exo2/db.md5</option>
<option value="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/ids.cfg?r=21&spec=svn22"
>/trunk/nss/homework/exo2/ids.cfg</option>
<option value="/p/5a-bucarest-vallee-cedric/source/browse/trunk/nss/homework/exo2/ids.py?r=21&spec=svn22"
selected="selected"
>/trunk/nss/homework/exo2/ids.py</option>
</select>
</td></tr></table>
<div id="review_instr" class="closed">
<a class="ifOpened" href="/p/5a-bucarest-vallee-cedric/source/detail?r=21&spec=svn22#publish">Publish your comments</a>
<div class="ifClosed">Double click a line to add a comment</div>
</div>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="older_bubble">
<p>Older revisions</p>
<div class="closed" style="margin-bottom:3px;" >
<img class="ifClosed" onclick="_toggleHidden(this)" src="http://www.gstatic.com/codesite/ph/images/plus.gif" >
<img class="ifOpened" onclick="_toggleHidden(this)" src="http://www.gstatic.com/codesite/ph/images/minus.gif" >
<a href="/p/5a-bucarest-vallee-cedric/source/detail?spec=svn22&r=20">r20</a>
by V4LLEE.CEDRIC@gmail.com
on Today (79 minutes ago)
<a href="/p/5a-bucarest-vallee-cedric/source/diff?spec=svn22&r=20&format=side&path=/trunk/nss/homework/exo2/ids.py&old_path=/trunk/nss/homework/exo2/ids.py&old=19">Diff</a>
<br>
<pre class="ifOpened">ok gestion md5</pre>
</div>
<div class="closed" style="margin-bottom:3px;" >
<img class="ifClosed" onclick="_toggleHidden(this)" src="http://www.gstatic.com/codesite/ph/images/plus.gif" >
<img class="ifOpened" onclick="_toggleHidden(this)" src="http://www.gstatic.com/codesite/ph/images/minus.gif" >
<a href="/p/5a-bucarest-vallee-cedric/source/detail?spec=svn22&r=19">r19</a>
by V4LLEE.CEDRIC@gmail.com
on Today (8 hours ago)
<a href="/p/5a-bucarest-vallee-cedric/source/diff?spec=svn22&r=19&format=side&path=/trunk/nss/homework/exo2/ids.py&old_path=/trunk/nss/homework/exo2/ids.py&old=18">Diff</a>
<br>
<pre class="ifOpened">yes yes yes</pre>
</div>
<div class="closed" style="margin-bottom:3px;" >
<img class="ifClosed" onclick="_toggleHidden(this)" src="http://www.gstatic.com/codesite/ph/images/plus.gif" >
<img class="ifOpened" onclick="_toggleHidden(this)" src="http://www.gstatic.com/codesite/ph/images/minus.gif" >
<a href="/p/5a-bucarest-vallee-cedric/source/detail?spec=svn22&r=18">r18</a>
by V4LLEE.CEDRIC@gmail.com
on Today (8 hours ago)
<a href="/p/5a-bucarest-vallee-cedric/source/diff?spec=svn22&r=18&format=side&path=/trunk/nss/homework/exo2/ids.py&old_path=/trunk/nss/homework/exo2/ids.py&old=">Diff</a>
<br>
<pre class="ifOpened">avance doucement</pre>
</div>
<a href="/p/5a-bucarest-vallee-cedric/source/list?path=/trunk/nss/homework/exo2/ids.py&start=21">All revisions of this file</a>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
<div class="pmeta_bubble_bg" style="border:1px solid white">
<div class="round4"></div>
<div class="round2"></div>
<div class="round1"></div>
<div class="box-inner">
<div id="fileinfo_bubble">
<p>File info</p>
<div>Size: 6068 bytes,
206 lines</div>
<div><a href="//5a-bucarest-vallee-cedric.googlecode.com/svn/trunk/nss/homework/exo2/ids.py">View raw file</a></div>
</div>
</div>
<div class="round1"></div>
<div class="round2"></div>
<div class="round4"></div>
</div>
</div>
</div>
</div>
</div>
</div>
<script src="http://www.gstatic.com/codesite/ph/1847340689237817661/js/prettify/prettify.js"></script>
<script type="text/javascript">prettyPrint();</script>
<script src="http://www.gstatic.com/codesite/ph/1847340689237817661/js/source_file_scripts.js"></script>
<script type="text/javascript" src="https://kibbles.googlecode.com/files/kibbles-1.3.3.comp.js"></script>
<script type="text/javascript">
var lastStop = null;
var initialized = false;
function updateCursor(next, prev) {
if (prev && prev.element) {
prev.element.className = 'cursor_stop cursor_hidden';
}
if (next && next.element) {
next.element.className = 'cursor_stop cursor';
lastStop = next.index;
}
}
function pubRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftRevealed(data) {
updateCursorForCell(data.cellId, 'cursor_stop cursor_hidden');
if (initialized) {
reloadCursors();
}
}
function draftDestroyed(data) {
updateCursorForCell(data.cellId, 'nocursor');
if (initialized) {
reloadCursors();
}
}
function reloadCursors() {
kibbles.skipper.reset();
loadCursors();
if (lastStop != null) {
kibbles.skipper.setCurrentStop(lastStop);
}
}
// possibly the simplest way to insert any newly added comments
// is to update the class of the corresponding cursor row,
// then refresh the entire list of rows.
function updateCursorForCell(cellId, className) {
var cell = document.getElementById(cellId);
// we have to go two rows back to find the cursor location
var row = getPreviousElement(cell.parentNode);
row.className = className;
}
// returns the previous element, ignores text nodes.
function getPreviousElement(e) {
var element = e.previousSibling;
if (element.nodeType == 3) {
element = element.previousSibling;
}
if (element && element.tagName) {
return element;
}
}
function loadCursors() {
// register our elements with skipper
var elements = CR_getElements('*', 'cursor_stop');
var len = elements.length;
for (var i = 0; i < len; i++) {
var element = elements[i];
element.className = 'cursor_stop cursor_hidden';
kibbles.skipper.append(element);
}
}
function toggleComments() {
CR_toggleCommentDisplay();
reloadCursors();
}
function keysOnLoadHandler() {
// setup skipper
kibbles.skipper.addStopListener(
kibbles.skipper.LISTENER_TYPE.PRE, updateCursor);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_top', 50);
// Set the 'offset' option to return the middle of the client area
// an option can be a static value, or a callback
kibbles.skipper.setOption('padding_bottom', 100);
// Register our keys
kibbles.skipper.addFwdKey("n");
kibbles.skipper.addRevKey("p");
kibbles.keys.addKeyPressListener(
'u', function() { window.location = detail_url; });
kibbles.keys.addKeyPressListener(
'r', function() { window.location = detail_url + '#publish'; });
kibbles.keys.addKeyPressListener('j', gotoNextPage);
kibbles.keys.addKeyPressListener('k', gotoPreviousPage);
kibbles.keys.addKeyPressListener('h', toggleComments);
}
</script>
<script src="http://www.gstatic.com/codesite/ph/1847340689237817661/js/code_review_scripts.js"></script>
<script type="text/javascript">
function showPublishInstructions() {
var element = document.getElementById('review_instr');
if (element) {
element.className = 'opened';
}
}
var codereviews;
function revsOnLoadHandler() {
// register our source container with the commenting code
var paths = {'svn22': '/trunk/nss/homework/exo2/ids.py'}
codereviews = CR_controller.setup(
{"profileUrl":["/u/114866778344639499534/"],"token":"O3xiHWt3_Jm_qZ8IPZfxTV1QHMQ:1324516583409","assetHostPath":"http://www.gstatic.com/codesite/ph","domainName":null,"assetVersionPath":"http://www.gstatic.com/codesite/ph/1847340689237817661","projectHomeUrl":"/p/5a-bucarest-vallee-cedric","relativeBaseUrl":"","projectName":"5a-bucarest-vallee-cedric","loggedInUserEmail":"V4LLEE.CEDRIC@gmail.com"}, '', 'svn22', paths,
CR_BrowseIntegrationFactory);
// register our source container with the commenting code
// in this case we're registering the container and the revison
// associated with the contianer which may be the primary revision
// or may be a previous revision against which the primary revision
// of the file is being compared.
codereviews.registerSourceContainer(document.getElementById('lines'), 'svn22');
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, showPublishInstructions);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_PUB_PLATE, pubRevealed);
codereviews.registerActivityListener(CR_ActivityType.REVEAL_DRAFT_PLATE, draftRevealed);
codereviews.registerActivityListener(CR_ActivityType.DISCARD_DRAFT_COMMENT, draftDestroyed);
var initialized = true;
reloadCursors();
}
window.onload = function() {keysOnLoadHandler(); revsOnLoadHandler();};
</script>
<script type="text/javascript" src="http://www.gstatic.com/codesite/ph/1847340689237817661/js/dit_scripts.js"></script>
<script type="text/javascript" src="http://www.gstatic.com/codesite/ph/1847340689237817661/js/ph_core.js"></script>
<script type="text/javascript" src="/js/codesite_product_dictionary_ph.pack.04102009.js"></script>
</div>
<div id="footer" dir="ltr">
<div class="text">
©2011 Google -
<a href="/projecthosting/terms.html">Terms</a> -
<a href="http://www.google.com/privacy.html">Privacy</a> -
<a href="/p/support/">Project Hosting Help</a>
</div>
</div>
<div class="hostedBy" style="margin-top: -20px;">
<span style="vertical-align: top;">Powered by <a href="http://code.google.com/projecthosting/">Google Project Hosting</a></span>
</div>
</body>
</html>
| Python |
#####################################################
# Python Port Scanner #
#####################################################
# Vallee Cedric #
# 17/12/2011 #
# Network and Systems Security #
#####################################################
# In bash nc -vvz 192.168.0.1 79-81 2>&1 | grep open
import socket, subprocess
import sys
import threading
from optparse import OptionParser
MAX_THREADS = 50
#ipRange generator from startAddr to endAddr
# startAddr : must be a standard ip : example 127.0.0.1
# endAddr : just need the last digit : 2.45 means from 127.0.0.1 to 127.0.2.45
def ipAddrRange(startAddr, endAddr):
def incrAddr(addrList):
addrList[3] += 1
for i in (3,2,1):
if addrList[i] == 256:
addrList[i] = 0
addrList[i-1] += 1
def asString(addrList):
return ".".join(map(str,addrList))
startAddrList = map(int,startAddr.split("."))
endAddrList = map(int,endAddr.split("."))
if len(endAddrList)<4:
for i in range(4-len(endAddrList)):
endAddrList.insert(i,startAddrList[i])
curAddrList = startAddrList[:]
yield asString(curAddrList)
for i in range(4):
while curAddrList[i] < endAddrList[i]:
incrAddr(curAddrList)
yield asString(curAddrList)
#Threaded port's scanner
class Scanner(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
# host and port
self.host = host
self.port = port
# build up the socket obj
self.sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
try:
# connect to the given host:port
self.sd.connect((self.host, self.port))
print "%s:%d OPEN" % (self.host, self.port)
self.sd.close()
except:
#print "%s:%d Closed" % (self.host, self.port)
pass
#General scanner
class Scan:
def __init__(self, host,start,end) :
# start port and end port
self.start, self.stop = start, end
self.ipfin = ""
# host resolution
#Resolve the arguments like "127.0.0.1-2.45:78-85"
#which means analyse any target from 127.0.0.1 to 127.0.2.45 from the 78 port to the 85
if ':' in host:
self.host = host.split(':')[0]
self.ipfin = self.host
if '-' in host.split(':')[1]:
self.start= int(host.split(':')[1].split('-')[0])
self.stop= int(host.split(':')[1].split('-')[1])
else :
self.start = int(host.split(':')[1])
self.stop = int(host.split(':')[1])
else :
self.host = host
self.ipfin = host
scan = "Scan of " + self.host
if '-' in self.host :
self.ipfin= self.host.split('-')[1]
self.host= self.host.split('-')[0]
scan = "Scan from " + self.host + " to " + self.ipfin
scan += "\nPorts '%d' to '%d'" %(self.start,self.stop)
#beginning of the scan, target by target
print scan
for addr in ipAddrRange(self.host,self.ipfin):
#We test if the target is alive by pinging it, icmp request
ping = subprocess.Popen(
["ping", "-c", "1", addr],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, error = ping.communicate()
#if the host is awake then we scan
if "1 received" in out:
print "self scan lance %s"%addr
self.scan(addr, self.start, self.stop)
else:
print "hostname '%s' unknown" % addr
def scan(self, host, start, stop):
self.port = start
while self.port <= stop:
# we start as much thread as possible
# each one try one port
# During the answer's delay, we can do other actions like trying an other port
if threading.activeCount() < MAX_THREADS:
Scanner(host, self.port).start()
self.port += 1
def main():
#we parse the options and arguments of the script
parser = OptionParser(usage="usage: %prog [options] target",
version="%prog 1.0")
parser.add_option("-s", "--start", dest="pstart",type="int", help ="Starting value of the port scanning",default="1")
parser.add_option("-e", "--end", dest="pend",type="int", help ="Ending value of the port scanning",default="1024")
(options, args) = parser.parse_args()
if len(args)==1 :
Scan(args[0],options.pstart,options.pend)
else:
parser.error("wrong number of arguments")
if __name__ == "__main__":
main()
| Python |
#####################################################
# Python Watch and Warn Files Modification script #
#####################################################
# Vallee Cedric #
# 20/12/2011 #
# Network and Systems Security #
#####################################################
import re
import subprocess
import os
import hashlib
import smtplib
from email.MIMEText import MIMEText
#General analyser class : manage the mail sending
class Analyser:
#Constructor
#mail : where the mail will be sent
#dbpath : db location
#fileList : List of files to analyse, ids.py and ids.cfg are automatically added
def __init__(self, mail, dbpath,fileList):
self.mail = mail
self.dbpath = dbpath
self.fileList = fileList
self.fileList.append("ids.cfg")
self.fileList.append("ids.py")
#report: string which store the mail which will be sent
self.report=""
self.run()
#send the report to the self.mail
def sendMail(self):
if self.report != "":
#header
self.report="\tAutomatic report from ids.py script\n"+ self.report
print self.report
fromaddr = 'cvallee.insa@gmail.com'
toaddrs = self.mail
msg = self.report
# Credentials for connecting on the google spam account
username = 'nss.automatic.message@gmail.com'
password = 'ThisIsMyPassword'
# The actual mail send
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
server.sendmail(fromaddr, toaddrs, msg)
server.quit()
self.report=""
else:
print "No modification"
#the main method
def Run(self):
pass
#database constructor
def create(self):
pass
#RpmAnalyser which is not yet implemented
class RpmAnalyser(Analyser):
def Run(self):
pass
def Create(self):
pass
#Analyser using the md5 hashcode for matching files
class MdAnalyser(Analyser):
def create(self):
#open the self.dbpath file in writing mode
db= open(self.dbpath,'w')
for i in self.fileList:
try:
#info=os.stat(i) brute information(same as ls)
#if i is a directory then we compute the md5 of the ls -lia i string
#if it's a file then we compute directly his md5
if os.path.isdir(i):
ls = subprocess.Popen(
["ls", i],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out , error = ls.communicate()
m=hashlib.md5(out)
hashTmp=m.hexdigest()+" "+i+"\n"
else:
md5sum = subprocess.Popen(
["md5sum", i],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
hashTmp, error = md5sum.communicate()
#write it in the db file
db.write(hashTmp)
except:
self.report+= "The %s file is missing\n"%i
#Report the creation
self.report+="The database has been created"
db.close()
def run(self):
dbmodified=False
if not os.path.exists(self.dbpath):
#if the db file doesn't exist then we create it
self.create()
else:
#open, put the former md5 value in a list, and remove the '\n' char
dbfile=open(self.dbpath,'r')
reg=re.compile("(\w*)\s*([^\s]*)\n")
db=dbfile.readlines()
dbfile.close()
db = map(lambda x: x.strip(),db)
#we look for each file of the config file
for i in self.fileList:
try:
#info=os.stat(i) #brute information(same as ls)
#if i is a directory then we compute the md5 of the ls -lia i string
#if it's a file then we compute directly his md5
if os.path.isdir(i):
ls = subprocess.Popen(
["ls", i],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out , error = ls.communicate()
m=hashlib.md5(out)
hashTmp=m.hexdigest()+" "+i
else:
md5sum = subprocess.Popen(
["md5sum", i],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
hashTmp, error = md5sum.communicate()
hashTmp=hashTmp[:-1]
#if the current md5 value and the former one are different then the files have been modified
#so we put it in the report and update the md5 value to the new one
if not hashTmp in db and not i == self.dbpath :
#Update of the database
for n in db:
if i in n:
db.remove(n)
db.append(hashTmp)
break
dbmodified=True
#Additionnal information for report
#We join the ls -lia of the current file or directory that have been modified
self.report+="File %s has been modificated\n"%i
out, err = subprocess.Popen(
["ls", "-lia", i],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
).communicate()
self.report+=out+"\n"
except:
self.report+= "The %s file is missing\n"%i
if dbmodified:
#if there was a modification of one of the files then we update the db file for the next call
dbfile= open(self.dbpath,'w')
dbfile.write("\n".join(db))
self.report+="The database has been updated"
dbfile.close()
#we check if we need to send the report
self.sendMail()
def main():
try:
cfgfile=open('ids.cfg','r')
except :
#raise Exception('config file not found\nIds abort\n')
print '\tconfiguration file not found\n\tIds script aborted\n'
return
mail=""
rpm=False
dbpath=""
file=False
fileList=[]
#Processing the configuration file, and looking for the specials patterns
for line in cfgfile:
mailPat = re.compile("(email|e-mail)\s*:\s*([^\s]*)",re.IGNORECASE)
pathPat = re.compile("(db|path)\s*:\s*([^\s]*)",re.IGNORECASE)
lPat = re.compile("#.*list of files{0,1}.*#",re.IGNORECASE)
if file and line != '\n' and not line[0] == '#' :
fileList.append(line[:-1])
elif mailPat.match(line):
mail = mailPat.match(line).group(2)
elif pathPat.match(line):
dbpath=pathPat.match(line).group(2)
elif re.match("option\s*:\s*rpm",line,re.IGNORECASE):
rpm=True
elif lPat.match(line):
file=True
else:
#Comment or unknown option
pass
if rpm:
RpmAnalyser(mail,dbpath,fileList)
else:
MdAnalyser(mail,dbpath,fileList)
if __name__ == "__main__":
main()
| Python |
#####################################################
# Python Port Scanner #
#####################################################
# Vallee Cedric #
# 17/12/2011 #
# Network and Systems Security #
#####################################################
# In bash nc -vvz 192.168.0.1 79-81 2>&1 | grep open
import socket, subprocess
import sys
import threading
from optparse import OptionParser
MAX_THREADS = 50
#ipRange generator from startAddr to endAddr
# startAddr : must be a standard ip : example 127.0.0.1
# endAddr : just need the last digit : 2.45 means from 127.0.0.1 to 127.0.2.45
def ipAddrRange(startAddr, endAddr):
def incrAddr(addrList):
addrList[3] += 1
for i in (3,2,1):
if addrList[i] == 256:
addrList[i] = 0
addrList[i-1] += 1
def asString(addrList):
return ".".join(map(str,addrList))
startAddrList = map(int,startAddr.split("."))
endAddrList = map(int,endAddr.split("."))
if len(endAddrList)<4:
for i in range(4-len(endAddrList)):
endAddrList.insert(i,startAddrList[i])
curAddrList = startAddrList[:]
yield asString(curAddrList)
for i in range(4):
while curAddrList[i] < endAddrList[i]:
incrAddr(curAddrList)
yield asString(curAddrList)
#Threaded port's scanner
class Scanner(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
# host and port
self.host = host
self.port = port
# build up the socket obj
self.sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
try:
# connect to the given host:port
self.sd.connect((self.host, self.port))
print "%s:%d OPEN" % (self.host, self.port)
self.sd.close()
except:
#print "%s:%d Closed" % (self.host, self.port)
pass
#General scanner
class Scan:
def __init__(self, host,start,end) :
# start port and end port
self.start, self.stop = start, end
self.ipfin = ""
# host resolution
#Resolve the arguments like "127.0.0.1-2.45:78-85"
#which means analyse any target from 127.0.0.1 to 127.0.2.45 from the 78 port to the 85
if ':' in host:
self.host = host.split(':')[0]
self.ipfin = self.host
if '-' in host.split(':')[1]:
self.start= int(host.split(':')[1].split('-')[0])
self.stop= int(host.split(':')[1].split('-')[1])
else :
self.start = int(host.split(':')[1])
self.stop = int(host.split(':')[1])
else :
self.host = host
self.ipfin = host
scan = "Scan of " + self.host
if '-' in self.host :
self.ipfin= self.host.split('-')[1]
self.host= self.host.split('-')[0]
scan = "Scan from " + self.host + " to " + self.ipfin
scan += "\nPorts '%d' to '%d'" %(self.start,self.stop)
#beginning of the scan, target by target
print scan
for addr in ipAddrRange(self.host,self.ipfin):
#We test if the target is alive by pinging it, icmp request
ping = subprocess.Popen(
["ping", "-c", "1", addr],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, error = ping.communicate()
#if the host is awake then we scan
if "1 received" in out:
print "self scan lance %s"%addr
self.scan(addr, self.start, self.stop)
else:
print "hostname '%s' unknown" % addr
def scan(self, host, start, stop):
self.port = start
while self.port <= stop:
# we start as much thread as possible
# each one try one port
# During the answer's delay, we can do other actions like trying an other port
if threading.activeCount() < MAX_THREADS:
Scanner(host, self.port).start()
self.port += 1
def main():
#we parse the options and arguments of the script
parser = OptionParser(usage="usage: %prog [options] target",
version="%prog 1.0")
parser.add_option("-s", "--start", dest="pstart",type="int", help ="Starting value of the port scanning",default="1")
parser.add_option("-e", "--end", dest="pend",type="int", help ="Ending value of the port scanning",default="1024")
(options, args) = parser.parse_args()
if len(args)==1 :
Scan(args[0],options.pstart,options.pend)
else:
parser.error("wrong number of arguments")
if __name__ == "__main__":
main()
| Python |
# Python Port Scanner
# one thread by target
#In bash nc -vvz 192.168.0.1 79-81 2>&1 | grep open
import socket, subprocess
import sys
import threading
from optparse import OptionParser
MAX_THREADS = 50
def ipAddrRange(startAddr, endAddr):
def incrAddr(addrList):
addrList[3] += 1
for i in (3,2,1):
if addrList[i] == 256:
addrList[i] = 0
addrList[i-1] += 1
def asString(addrList):
return ".".join(map(str,addrList))
startAddrList = map(int,startAddr.split("."))
endAddrList = map(int,endAddr.split("."))
if len(endAddrList)<4:
for i in range(4-len(endAddrList)):
endAddrList.insert(i,startAddrList[i])
curAddrList = startAddrList[:]
yield asString(curAddrList)
for i in range(4):
while curAddrList[i] < endAddrList[i]:
incrAddr(curAddrList)
yield asString(curAddrList)
class Scanner(threading.Thread):
def __init__(self, host, port):
threading.Thread.__init__(self)
# host and port
self.host = host
self.port = port
# build up the socket obj
self.sd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def run(self):
try:
# connect to the given host:port
self.sd.connect((self.host, self.port))
print "%s:%d OPEN" % (self.host, self.port)
self.sd.close()
except:
#print "%s:%d Closed" % (self.host, self.port)
pass
class Scan:
def __init__(self, host,start,end) :
# start port and end port
self.start, self.stop = start, end
self.ipfin = ""
# host resolution
if ':' in host:
self.host = host.split(':')[0]
self.ipfin = self.host
if '-' in host.split(':')[1]:
self.start= int(host.split(':')[1].split('-')[0])
self.stop= int(host.split(':')[1].split('-')[1])
else :
self.start = int(host.split(':')[1])
self.stop = int(host.split(':')[1])
else :
self.host = host
self.ipfin = host
scan = "Scan of " + self.host
if '-' in self.host :
self.ipfin= self.host.split('-')[1]
self.host= self.host.split('-')[0]
scan = "Scan from " + self.host + " to " + self.ipfin
scan += "\nPorts '%d' to '%d'" %(self.start,self.stop)
print scan
for addr in ipAddrRange(self.host,self.ipfin):
ping = subprocess.Popen(
["ping", "-c", "1", addr],
stdout = subprocess.PIPE,
stderr = subprocess.PIPE
)
out, error = ping.communicate()
if "1 received" in out:
print "self scan lance %s"%addr
self.scan(addr, self.start, self.stop)
else:
print "hostname '%s' unknown" % addr
def incrementeIp(self) :
print "inc ip"
def scan(self, host, start, stop):
self.port = start
while self.port <= stop:
if threading.activeCount() < MAX_THREADS:
Scanner(host, self.port).start()
self.port += 1
def main():
parser = OptionParser(usage="usage: %prog [options] target",
version="%prog 1.0")
parser.add_option("-s", "--start", dest="pstart",type="int", help ="Starting value of the port scanning",default="1")
parser.add_option("-e", "--end", dest="pend",type="int", help ="Ending value of the port scanning",default="1024")
(options, args) = parser.parse_args()
if len(args)==1 :
Scan(args[0],options.pstart,options.pend)
else:
parser.error("wrong number of arguments")
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
#
# progressbar - Text progressbar library for python.
# Copyright (c) 2005 Nilton Volpato
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Text progressbar library for python.
This library provides a text mode progressbar. This is tipically used
to display the progress of a long running operation, providing a
visual clue that processing is underway.
The ProgressBar class manages the progress, and the format of the line
is given by a number of widgets. A widget is an object that may
display diferently depending on the state of the progress. There are
three types of widget:
- a string, which always shows itself;
- a ProgressBarWidget, which may return a diferent value every time
it's update method is called; and
- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
expands to fill the remaining width of the line.
The progressbar module is very easy to use, yet very powerful. And
automatically supports features like auto-resizing when available.
"""
__author__ = "Nilton Volpato"
__author_email__ = "first-name dot last-name @ gmail.com"
__date__ = "2006-05-07"
__version__ = "2.2"
# Changelog
#
# 2006-05-07: v2.2 fixed bug in windows
# 2005-12-04: v2.1 autodetect terminal width, added start method
# 2005-12-04: v2.0 everything is now a widget (wow!)
# 2005-12-03: v1.0 rewrite using widgets
# 2005-06-02: v0.5 rewrite
# 2004-??-??: v0.1 first version
import sys, time
from array import array
try:
from fcntl import ioctl
import termios
except ImportError:
pass
import signal
class ProgressBarWidget(object):
"""This is an element of ProgressBar formatting.
The ProgressBar object will call it's update value when an update
is needed. It's size may change between call, but the results will
not be good if the size changes drastically and repeatedly.
"""
def update(self, pbar):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made.
At least this function must be overriden."""
pass
class ProgressBarWidgetHFill(object):
"""This is a variable width element of ProgressBar formatting.
The ProgressBar object will call it's update value, informing the
width this object must the made. This is like TeX \\hfill, it will
expand to fill the line. You can use more than one in the same
line, and they will all have the same width, and together will
fill the line.
"""
def update(self, pbar, width):
"""Returns the string representing the widget.
The parameter pbar is a reference to the calling ProgressBar,
where one can access attributes of the class for knowing how
the update must be made. The parameter width is the total
horizontal width the widget must have.
At least this function must be overriden."""
pass
class ETA(ProgressBarWidget):
"Widget for the Estimated Time of Arrival"
def format_time(self, seconds):
return time.strftime('%H:%M:%S', time.gmtime(seconds))
def update(self, pbar):
if pbar.currval == 0:
return 'ETA: --:--:--'
elif pbar.finished:
return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
else:
elapsed = pbar.seconds_elapsed
eta = elapsed * pbar.maxval / pbar.currval - elapsed
return 'ETA: %s' % self.format_time(eta)
class FileTransferSpeed(ProgressBarWidget):
"Widget for showing the transfer speed (useful for file transfers)."
def __init__(self):
self.fmt = '%6.2f %s'
self.units = ['B','K','M','G','T','P']
def update(self, pbar):
if pbar.seconds_elapsed < 2e-6:#== 0:
bps = 0.0
else:
bps = float(pbar.currval) / pbar.seconds_elapsed
spd = bps
for u in self.units:
if spd < 1000:
break
spd /= 1000
return self.fmt % (spd, u+'/s')
class RotatingMarker(ProgressBarWidget):
"A rotating marker for filling the bar of progress."
def __init__(self, markers='|/-\\'):
self.markers = markers
self.curmark = -1
def update(self, pbar):
if pbar.finished:
return self.markers[0]
self.curmark = (self.curmark + 1)%len(self.markers)
return self.markers[self.curmark]
class Percentage(ProgressBarWidget):
"Just the percentage done."
def update(self, pbar):
return '%3d%%' % pbar.percentage()
class Bar(ProgressBarWidgetHFill):
"The bar of progress. It will strech to fill the line."
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width,string=""):
percent = pbar.percentage()
cwidth = (width - len(self.left) - len(self.right))
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
if pbar.finished :
cwidth = (cwidth - len(string))/2
marked_width = int(percent * cwidth / 100)
bar = (self.left + (m*(marked_width)).ljust(cwidth)+string + (m*(marked_width)).ljust(cwidth) + self.right)
else :
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class MiddleTexteBar(ProgressBarWidgetHFill):
"The bar of progress. It will strech to fill the line with text in middle."
def __init__(self, marker='#', left='|', right='|'):
self.marker = marker
self.left = left
self.right = right
self.texte = ""
def _format_marker(self, pbar):
if isinstance(self.marker, (str, unicode)):
return self.marker
else:
return self.marker.update(pbar)
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = (width - len(self.left) - len(self.right))
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
if pbar.finished :
cwidth = (cwidth - len(string))/2
marked_width = int(percent * cwidth / 100)
bar = (self.left + (m*(marked_width)).ljust(cwidth)+ self.texte + (m*(marked_width)).ljust(cwidth) + self.right)
else :
bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
return bar
class ReverseBar(Bar):
"The reverse bar of progress, or bar of regress. :)"
def update(self, pbar, width):
percent = pbar.percentage()
cwidth = width - len(self.left) - len(self.right)
marked_width = int(percent * cwidth / 100)
m = self._format_marker(pbar)
bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
return bar
default_widgets = [Percentage(), ' ', Bar()]
class ProgressBar(object):
"""This is the ProgressBar class, it updates and prints the bar.
The term_width parameter may be an integer. Or None, in which case
it will try to guess it, if it fails it will default to 80 columns.
The simple use is like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
But anything you want to do is possible (well, almost anything).
You can supply different widgets of any type in any order. And you
can even write your own widgets! There are many widgets already
shipped and you should experiment with them.
When implementing a widget update method you may access any
attribute or function of the ProgressBar object calling the
widget's update method. The most important attributes you would
like to access are:
- currval: current value of the progress, 0 <= currval <= maxval
- maxval: maximum (and final) value of the progress
- finished: True if the bar is have finished (reached 100%), False o/w
- start_time: first time update() method of ProgressBar was called
- seconds_elapsed: seconds elapsed since start_time
- percentage(): percentage of the progress (this is a method)
"""
def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
fd=sys.stderr):
assert maxval > 0
self.maxval = maxval
self.widgets = widgets
self.fd = fd
self.signal_set = False
if term_width is None:
try:
self.handle_resize(None,None)
signal.signal(signal.SIGWINCH, self.handle_resize)
self.signal_set = True
except:
self.term_width = 79
else:
self.term_width = term_width
self.currval = 0
self.finished = False
self.prev_percentage = -1
self.start_time = None
self.seconds_elapsed = 0
def handle_resize(self, signum, frame):
h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
self.term_width = w
def percentage(self):
"Returns the percentage of the progress."
return self.currval*100.0 / self.maxval
def _format_widgets(self):
r = []
hfill_inds = []
num_hfill = 0
currwidth = 0
for i, w in enumerate(self.widgets):
if isinstance(w, ProgressBarWidgetHFill):
r.append(w)
hfill_inds.append(i)
num_hfill += 1
elif isinstance(w, (str, unicode)):
r.append(w)
currwidth += len(w)
else:
weval = w.update(self)
currwidth += len(weval)
r.append(weval)
for iw in hfill_inds:
r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
return r
def _format_line(self):
return ''.join(self._format_widgets()).ljust(self.term_width)
def _need_update(self):
return int(self.percentage()) != int(self.prev_percentage)
def update(self, value):
"Updates the progress bar to a new value."
assert 0 <= value <= self.maxval
self.currval = value
if not self._need_update() or self.finished:
return
if not self.start_time:
self.start_time = time.time()
self.seconds_elapsed = time.time() - self.start_time
self.prev_percentage = self.percentage()
if value != self.maxval:
self.fd.write(self._format_line() + '\r')
else:
self.finished = True
self.fd.write(self._format_line() + '\n')
def start(self):
"""Start measuring time, and prints the bar at 0%.
It returns self so you can use it like this:
>>> pbar = ProgressBar().start()
>>> for i in xrange(100):
... # do something
... pbar.update(i+1)
...
>>> pbar.finish()
"""
self.update(0)
return self
def finish(self):
"""Used to tell the progress is finished."""
self.update(self.maxval)
if self.signal_set:
signal.signal(signal.SIGWINCH, signal.SIG_DFL)
if __name__=='__main__':
import os
def example1():
widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example2():
class CrazyFileTransferSpeed(FileTransferSpeed):
"It's bigger between 45 and 80 percent"
def update(self, pbar):
if 45 < pbar.percentage() < 80:
return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
else:
return FileTransferSpeed.update(self,pbar)
widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
pbar = ProgressBar(widgets=widgets, maxval=10000000)
# maybe do something
pbar.start()
for i in range(2000000):
# do something
pbar.update(5*i+1)
pbar.finish()
print
def example3():
widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
for i in range(1000000):
# do something
pbar.update(10*i+1)
pbar.finish()
print
def example4():
widgets = ['Test: ', Percentage(), ' ',
Bar(marker='0',left='[',right=']'),
' ', ETA(), ' ', FileTransferSpeed()]
pbar = ProgressBar(widgets=widgets, maxval=500)
pbar.start()
for i in range(100,500+1,50):
time.sleep(0.2)
pbar.update(i)
pbar.finish()
print
# example1()
# example2()
# example3()
example5()
| Python |
# email_spam.py
#
# This file does the actual spam detection in Peter Ballard's email spam filter.
# The function check() takes in an email message (the "message" class from
# the standard Python module email), and returns a status
# ("wham", "mham", "spam" or "ham") as well as a one line description.
#
# Feel free to modify this program to your taste.
# But the intention is that you can use this program unchanged,
# and that you only need to modify email_defs.py
#
# Version 1.0, 4-Apr-2005.
#
#
#Copyright (C) 2005 Peter Ballard
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import string
import re
import email_defs
##################################################
# Functions for getting data from another file
##################################################
# each (whitespace separated) string in the file becomes a list element,
# unless the line begins with "#" or ";"
def file_to_list(filename):
list = []
fp = open(filename, "r")
for line in fp.readlines():
words = string.split(line)
if len(words) and words[0][0]!="#" and words[0][0]!=";":
list.extend(words)
fp.close()
return list
##################################################
# Functions for separating out part of the message
##################################################
def getfield(msg, field):
if msg.has_key(field):
return string.strip(msg.get(field))
else:
return ""
def subject(msg):
return getfield(msg, "Subject")
# body includes subject,
# and bodies of all sub-messages if it's multipart
def body(msg):
# initial space + trailing CR to aid regexp matching
retstring = " " + subject(msg) + "\n"
if msg.is_multipart():
for submsg in msg.get_payload():
retstring = retstring + submsg.as_string()
else:
retstring = retstring + msg.get_payload()
return retstring
def extract_addresses(astr):
# return list of matching strings
alist = []
for obj in re.finditer("[-\w\.]+@[-\w\.]+", astr):
addr = string.lower(obj.group())
alist.append( addr)
# this little bit increases time from about 65 to 75 secs for 5500 msgs
# it's probably worth it to get more generalised addr checking
parts = string.split(addr, "@")
if len(parts)==2:
# append all domain names,
# down to domain names of two parts
# e.g. with pballard@ozemail.com.au it would add "ozemail.com.au" and "com.au"
bits = string.split(parts[1], ".")
for i in range(0, len(bits)-1):
alist.append(string.join( bits[i:], "."))
return alist
# doing domains like this instead of regexps reduces time from 88 to 61 secs for 5764 messages
def extract_urls(astr):
# return list of matching strings
alist = []
# checking for =2e doesnt cost much,
# because some spams get identified quicker (before we reach regexps)
for dot in [".", "=2e"]:
for obj in re.finditer(r"http://[-\w" + dot + "]+", astr):
addr = string.lower(obj.group())[7:] # strip 1st 7 chars i.e. "http://"
# append all domain names,
# down to domain names of one parts
# e.g. http://ozemail.com.au it would add "ozemail.com.au" and "com.au" and "au"
bits = string.split(addr, dot)
for i in range(0, len(bits)):
alist.append(string.join( bits[i:], "."))
return alist
def recipient(msg):
return extract_addresses(getfield(msg, "To")) + extract_addresses(getfield(msg, "Cc"))
def sender(msg):
if msg.has_key("From"):
# already in lower case coming out of extract_address[es]()
alist = extract_addresses(msg.get("From"))
if len(alist):
return alist
return []
# Content-Type string for all attachments, if any
def msg_attachment_types(msg):
if msg.is_multipart():
retlist = []
for submsg in msg.get_payload():
if submsg.has_key("Content-Type"):
retlist.append(submsg.get("Content-Type"))
return retlist
else:
return []
##################################################
# Functions for actual spam matching
##################################################
def file_to_phrase_list(filename):
list = []
fp = open(filename, "r")
for line in fp.readlines():
words = string.split(line)
if len(words) and words[0][0]!="#" and words[0][0]!=";":
list.append(string.strip(line))
fp.close()
return list
def really_from_me(msg):
if find_listmember_in_list(email_defs.my_addresses, sender(msg)):
xmailer = getfield(msg, "X-Mailer")
if email_defs.really_from_me(xmailer):
return email_defs.my_address
else:
return ""
def bad_attachment(msg, black_attachment_types):
for strng in msg_attachment_types(msg):
for ext in black_attachment_types:
if re.search(r'\.' + ext, strng):
return "##bad_attachment:." + ext
return 0
def non_letter():
return r"[^a-z0-9]"
def non_trickyletter():
return r"[^a-z0-9@!\|]"
def domainchars():
return r"[a-z0-9_\-]"
def nondomainchars():
return r"[^a-z0-9_\-]"
def letteralt(ch):
if ch=="a":
# need to also add the accented a
return "[a@]"
elif ch=="i" or ch=="l" or ch=="1":
# with ascii chars>127, it seems that raw strings dont work
# need to also add the accented i
# chr(195) = i with circumflex
return "[il1!\\|" + chr(int("356",8)) + "]"
if ch=="o":
return "[o0]"
else:
return "[" + ch + "]"
def simpleregexp(str):
return non_letter() + str + non_letter()
def trickyregexp(str):
outstring = non_trickyletter() + letteralt(str[0])
for i in range(1, len(str)):
outstring = outstring + non_trickyletter() + "*?" + letteralt(str[i])
return outstring + non_trickyletter()
def findword(str, wordlist):
for word in wordlist:
if re.search(simpleregexp(word), str):
return simpleregexp(word)
return 0
def findstring(str, wordlist):
for word in wordlist:
if string.count(str, word):
return word
return 0
def findregexp(str, wordlist):
for word in wordlist:
if re.search(word, str, re.MULTILINE): # makes no performance or result difference on current set
#if re.search(word, str):
return word
return 0
def find_in_list(str, list):
# using this rather list.count reduces time fro, 118 to 75 secs on 5586 messages
if str in list:
return str
return ""
def find_listmember_in_list(strlist, list):
for str in strlist:
if str in list:
return str
return ""
def bad_recipients(recipient_list):
#recipient_string = string.join(recipient_list, " ")
if not find_listmember_in_list(email_defs.my_addresses, recipient_list):
return "##recipient_not_me"
elif recipient_list.count(email_defs.my_isp_domain) >= 3:
return "##many_my_isp_recipients"
else:
return ""
def bad_subject(msg):
if subject(msg)[:2]=="=?":
return "=?"
else:
return ""
def empty_message(msg, body_mod):
if subject(msg)=="" and string.strip(body_mod)=="":
return "##empty_message"
else:
return ""
##################################################
# The main entry program
##################################################
def check(msg):
# strip any empty tags (i.e. <tag></tag>), and make lower case
#body_notags = re.sub("<(?P<tag>[a-z]+)></(?P=tag)>", "", string.lower(body(msg)))
bodylow = string.lower(body(msg))
bodyearly = bodylow[:300] # first 200 chars in body (including subject)
thissender = sender(msg)
# look for whitelisted ham (wham)
whitematch = (find_listmember_in_list(thissender, whitelist)
or really_from_me(msg))
if whitematch:
return "wham", whitematch
# looked for "marked" ham (mham)
mwhitematch = (findstring(bodyearly, whiteintrostrings)
or findregexp(bodyearly, whiteintroregexps))
if mwhitematch:
return "mham", mwhitematch
# bad_recipients gets most hits so do it first
blackmatch = (bad_recipients(recipient(msg))
or findstring(bodylow, blackstrings)
or empty_message(msg, bodylow)
or bad_attachment(msg, black_attachment_types)
or bad_subject(msg)
or findstring(extract_urls(bodylow), blackdomains)
# regexps are costliest so check them last (sped up 1099 emails from 43 to 24 secs!)
or findregexp(bodylow, blackregexps))
if blackmatch:
return "spam", blackmatch
# do this last. It looks like a good trick but only caught 3 of 1508 spams
body_notags = re.sub("<.*?>", "", bodylow)
blackmatch = findregexp(body_notags, blackregexps)
if blackmatch:
return "spam", blackmatch
else:
return " ham", ""
##################################################
# Some top level definitions
##################################################
trickywords = email_defs.trickywords
blackwords = email_defs.blackwords
blackphrases = email_defs.blackphrases
blackregexps = email_defs.blackregexps
for word in trickywords:
# this saves 1/10000, and simplifies some output, but again about 4/3 slowdown
blackregexps.append(simpleregexp(word))
blackregexps.append(trickyregexp(word))
# this catches very few extra cases (in fact 1: "meds!"), but increases time 61 -> 82 secs for 6643 msgs
#blackregexps.append(simpleregexp(word))
for word in blackwords:
blackregexps.append(simpleregexp(word))
for blackphrase in blackphrases:
list = string.split(string.strip(blackphrase))
regexp = list[0]
for i in range(1, len(list)):
#regexp = regexp + r"\s" + list[i]
regexp = regexp + non_letter() + "+?" + list[i]
blackregexps.append(regexp)
blackstrings = email_defs.blackstrings
blackdomains = file_to_list(email_defs.blackdomains)
black_attachment_types = email_defs.black_attachment_types
# words in the intro, or early in the body, which indicate wham
whiteintrowords = email_defs.whiteintrowords
whiteintroregexps = []
for word in whiteintrowords:
whiteintroregexps.append(simpleregexp(word))
# strings in the subject line which indicate mail is wham
whiteintrostrings = email_defs.whiteintrostrings
whitelist = file_to_list(email_defs.whitelist)
| Python |
# email_defs.py
#
# This file is the "local definitions" part of Peter Ballard's email spam filter.
# Some of this you will need to change,
# and most of this you will want to change.
#
# Note the definitions for whitelist and blackdomains are not done here,
# but are in separate files. This is because these two lists can be quite
# long, and might even be created automatically. In these files,
# each (whitespace separated) string in the file becomes a list element,
# unless the line begins with "#" or ";"
# These files are not included in the package, because obviously I don't
# want to make my whitelist public.
#
# Version 1.0, 4-Apr-2005.
#
#Copyright (C) 2005 Peter Ballard
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#######################################
# definitions for email in and out files
#######################################
SYSMAILDIR = "/home/pballard/sysmail/"
MAILNAME = SYSMAILDIR + "pballard"
srcfilename = MAILNAME + ".raw"
storefilename = MAILNAME + ".store"
spamfilename = MAILNAME + ".spam"
hamfilename = MAILNAME + ".ham"
whamfilename = MAILNAME + ".wham"
mhamfilename = MAILNAME + ".mham"
panicfilename = MAILNAME + ".panic"
test_srcfilename = MAILNAME + ".store" # i.e. read back all the stored emails
#test_storefilename = "" # never used
test_spamfilename = "/dev/null"
# note these are written, not appended. Those parameters probably belong in here.
test_hamfilename = SYSMAILDIR + "test.ham"
test_whamfilename = SYSMAILDIR + "test.wham"
test_mhamfilename = SYSMAILDIR + "test.mham"
test_panicfilename = SYSMAILDIR + "test.panic"
#######################################
# Stuff used to detect spam
#######################################
# A few notes:
#
# "string" refers to a string
# "word" refers to a string bounded by whitespace
# "regexp" refers to a Python regular expression
# "phrase" is a set of words, separated by whitespace
# "intro" refers to the subject line plus the first 300 characters
# of the message body
#
# All text is (including email addresses) is converted to lower case.
#
# Lists of email names ("my_addresses" and "whitelist")
# may contain domain names also.
my_address = "pballard@ozemail.com.au"
my_isp_domain = "ozemail.com.au"
# you can add secondary email addresses to this list of strings
my_addresses = [my_address]
# words in the intro, or early in the body, which indicate mham
# (Mine are edited out. Add your own).
whiteintrowords = [] # a list of strings
# strings in the subject line (or early in the body) which indicate mail is mham
# (Mine are edited out. Add your own).
whiteintrostrings = [] # a list of strings
MAILLISTDIR = "/home/pballard/mail/"
whitelist = MAILLISTDIR + "whitelist.txt"
blackdomains = MAILLISTDIR + "blackdomains.txt"
# making most blackwords tricky (not just the usual suspects in the 1st and last lines)
# reduced (false) hams from 100 to 72,
# and increased time from 38 to 46 seconds for python 2.4a3
# (48 to 66 for python 2.2)
# for a corpus of 4149 messages (284 whams)
trickywords = ["xanax", "viagra", "cialis", "vicodin", "valium", "penis", "pharmacy",
"incest", "explicit", "ejaculation", "porno", "erection", "erections",
"orgasm", "pussy", "slut", "slutty", "sluts",
"personals",
"lottery",
"insurance", "wholesale", "mastercard",
"medication", "meds", "adipren", "prescription",
"hydrocodone",
"spyware",
"diploma", "diplomas",
"mortgage", "refinance",
"rolex"]
# dont put mlm in tricky words, because it can be mistaken for "mim"
# which might be a valid word (e.g. "mim" is an Islamic word).
blackwords = ["elkedeseen", "mlm"]
blackphrases = ["improve your size",
"rock hard",
"3 inches",
#"big dick",
#"huge dick",
"business offer",
"business proposal",
"future mailing",
"further mailing",
"business investment",
"million dollars",
"utmost confidentiality",
"ultmost confidentiality",
"bank transfer",
"money transfer",
"account details",
"foreign accounts",
"low price",
"double your money",
"money back guarantee",
"credit card",
"email database",
"xp professional",
"traders report",
"stock dividend",
"hot stocks",
"active stock",
"stock update",
"lotto games",
"want a watch", "cheap watch",
"red light cameras", "speed cameras"]
blackregexps = [r"font-size:\s*[01][^0-9\.]", # tiny html font
r"[a-z]<!.*?>[a-z]", # html comments in middle of a word
r"http://[0-9]+\.[0-9]+\.[0-9]+\.", # numerical url
#r"^ =\?", # subject begins with "=?" # covered in function bad_subject()
r"pills[0-9]*\.gif"]
blackstrings = ["fuck",
# currently non-alphanumerics go in strings only
"auto-generated",
# check for the string because it's often in a domain name
"casino"]
# .zip is OK from whitelisted people, but some spammers send it
black_attachment_types = ["com", "cpl", "exe", "pif", "scr", "vbs", "bat", "zip"]
# a function to check the X-Mailer field (the string xmailer here)
# to see if it's really from me.
# Note, in contrast to everything else, the X-Mailer field has NOT
# been converted to lower case.
# I've put this in email_defs.py because it probably would vary wildly
# from user to user
def really_from_me(xmailer):
return xmailer[:5]=="Emacs"
| Python |
# email_filter.py
#
# This file is the "bookkeeping" part of Peter Ballard's email spam filter.
# This is the top level program.
# It reads the user options, opens and closes the files, and sends emails
# to the appropriate files.
# However it is the function check() in the module email_spam, which this
# program calls, which does the actual checking.
#
# Feel free to modify this program to your taste.
# But the intention is that you can use this program unchanged,
# and that you only need to modify email_defs.py
#
# Version 1.01, 7-Apr-2005:
# - all messages to stdout;
# - neater way to clear srcfilename
# Version 1.0, 4-Apr-2005.
#
#
#Copyright (C) 2005 Peter Ballard
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 2
#of the License, or (at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Standard Python modules
import sys
import string
import re
import email
import email.Errors
#import mailbox
import os
# Python modules which are part of this package
import email_defs
import email_spam
##########################################
# Set up the options
##########################################
# set up the defaults (for non-test mode)
REAL = 1
srcfilename = email_defs.srcfilename
storefilename = email_defs.storefilename
spamfilename = email_defs.spamfilename
hamfilename = email_defs.hamfilename
whamfilename = email_defs.whamfilename
mhamfilename = email_defs.mhamfilename
panicfilename = email_defs.panicfilename
spammode = "ab"
hammode = "ab"
whammode = "ab"
mhammode = "ab"
panicmode = "ab"
detail = 0
summary = 1
verbose = 1
limit = 0
argv = sys.argv
if argv.count("-u"):
sys.stdout.write("python ~/software/email_filter.py")
sys.stdout.write(" [-test] \\\n")
sys.stdout.write(" [-src srcfilename] (default is " + srcfilename + ") \\\n")
sys.stdout.write(" [-[a]spam spamfilename] (default is " + spamfilename + ") \\\n")
sys.stdout.write(" [-[a]ham hamfilename] (default is " + hamfilename + ") \\\n")
sys.stdout.write(" [-[a]wham whamfilename] (default is " + whamfilename + ") \\\n")
sys.stdout.write(" [-[a]mham whamfilename] (default is " + mhamfilename + ") \\\n")
sys.stdout.write(" [-[a]panic panicfilename] (default is " + panicfilename + ") \\\n")
sys.stdout.write(" [-limit n] (default is " + str(limit) + ") \\\n")
sys.stdout.write(" [-detail n] (default is " + str(detail) + ") \\\n")
sys.stdout.write(" [-summary n] (default is " + str(summary) + ") \\\n")
sys.stdout.write(" [-verbose n] (default is " + str(verbose) + ")n")
sys.exit(0)
if argv.count("-test"):
# A different set of defaults for test mode
REAL = 0
argv.remove("-test")
srcfilename = email_defs.test_srcfilename
spamfilename = email_defs.test_spamfilename
hamfilename = email_defs.test_hamfilename
whamfilename = email_defs.test_whamfilename
mhamfilename = email_defs.test_mhamfilename
panicfilename = email_defs.test_panicfilename
spammode = "wb"
hammode = "wb"
whammode = "wb"
mhammode = "wb"
panicmode = "wb"
detail = 1
summary = 1
verbose = 1
limit = 0
options = ["srcfilename", "spamfilename", "whamfilename", "mhamfilename",
"hamfilename", "panicname",
"spammode", "whammode", "mhammode", "hammode", "panicmode",
"detail", "summary", "verbose", "limit"]
if REAL==0:
i = 1
while i<len(argv):
if i+1<len(argv) and argv[i][0]=="-" and (argv[i][1:] in options):
# evaluate - no checking here!
exec (argv[i][1:] + " = " + argv[i+1])
i += 2
else:
raise Exception, "Bad input option " + argv[i]
elif len(argv)>1:
raise Exception, "Option " + argv[1] + " only works with -test"
##########################################
# open the various files
##########################################
fp = open(srcfilename, "rb")
spamfile = open(spamfilename, spammode)
hamfile = open(hamfilename, hammode)
if whamfilename==hamfilename:
whamfile = hamfile
else:
whamfile = open(whamfilename, whammode)
if mhamfilename!=whamfilename:
mhamfile = open(mhamfilename, mhammode)
panicfile = open(panicfilename, panicmode)
##########################################
# Read the source file (fp),
# Creating a list, msgs.
# Each list item is a string which is a single email message.
##########################################
msgs = [] # each list element is an email message
thismsg = [] # each list element is a line
while 1:
line = fp.readline()
if line=="":
break
if len(line)>=5 and line[:5]=="From ":
if len(thismsg):
# this is MUCH faster than a loop of string concatenations,
# because a big (immutable) string is only built once.
msgs.append(string.join(thismsg, ""))
thismsg = [line]
else:
thismsg.append(line)
# end condition
if len(thismsg):
msgs.append(string.join(thismsg, ""))
fp.close()
# shorten the list if limit is specified.
# Perhaps this should be done dynamically while reading fp,
# So that msgs does not get ridiculously long.
if limit:
msgs = msgs[-limit:]
##########################################
# Setup up the arrays WHITEHITS and BLACKHITS
# These are only used in the detailed summary if detail==1
##########################################
WHITEHITS = {}
BLACKHITS = {}
for word in email_spam.whitelist + email_spam.whiteintroregexps + email_spam.whiteintrostrings:
WHITEHITS[word] = 0
for word in (email_spam.blackstrings + email_spam.blackregexps + email_spam.blackdomains
+ email_spam.black_attachment_types):
BLACKHITS[word] = 0
##########################################
# process each email using results from the email_spam module.
# The algorithm is:
# if (message cannot be parsed):
# it's a "panic"
# elif (sender is in whitelist):
# it's "wham"
# elif (mail contains a string indicating it's good):
# it's "mham"
# elif (mail looks like spam):
# it's "spam"
# else:
# it's "ham"
##########################################
spams = 0
hams = 0
whams = 0
mhams = 0
panics = 0
for msgstring in msgs:
try:
msg = email.message_from_string(msgstring)
except:
if verbose:
sys.stdout.write("panic\n")
panicfile.write(msgstring)
panics += 1
continue
(status, matchword) = email_spam.check(msg)
if status=="wham":
whams = whams + 1
if whamfile:
whamfile.write(msgstring)
#Do this to add a header...
#msg.add_header("X-diyfilter", "wham")
#whamfile.write(msg.as_string(1))
if not WHITEHITS.has_key(matchword):
WHITEHITS[matchword] = 1
else:
WHITEHITS[matchword] += 1
elif status=="mham":
mhams = mhams + 1
# always write to whamfile, and optionally ALSO to mhamfile
# (as a record of addresses that need to be whitelisted)
whamfile.write(msgstring)
if mhamfilename!=whamfilename:
#Do this to add a header...
#msg.add_header("X-diyfilter", "mham")
#mhamfile.write(msg.as_string(1))
mhamfile.write(msgstring)
if not WHITEHITS.has_key(matchword):
WHITEHITS[matchword] = 1
else:
WHITEHITS[matchword] += 1
elif status=="spam":
spams = spams+1
#Do this to add a header...
#msg.add_header("X-diyfilter", "spam")
#spamfile.write(msg.as_string(1))
spamfile.write(msgstring)
if not BLACKHITS.has_key(matchword):
BLACKHITS[matchword] = 1
else:
BLACKHITS[matchword] += 1
else:
#Do this to add a header...
#msg.add_header("X-diyfilter", "ham")
#hamfile.write(msg.as_string(1))
hamfile.write(msgstring)
hams = hams+1
if verbose:
# print a one-line summary per email
sys.stdout.write(status + "\t" + matchword + "\t")
sys.stdout.write(email_spam.subject(msg) + "\n")
if summary:
sys.stdout.write(`len(msgs)` + " messages: "
+ `whams` + " whams, " + `mhams` + " mhams, "
+ `hams` + " hams, " + `spams` + " spams, "
+ `panics` + " panics\n")
spamfile.close()
hamfile.close()
panicfile.close()
if whamfilename!=hamfilename:
whamfile.close()
if mhamfilename!=whamfilename:
mhamfile.close()
##########################################
# For real email reading (as opposed to testing)
# empty srcfilename into storefilename.
##########################################
if REAL:
os.system("cat " + srcfilename + " >> " + storefilename)
# reduce srcfilename to zero size
fp = open(srcfilename, "w")
fp.close()
# the old way
#os.system("/bin/rm -f " + srcfilename)
#os.system("touch " + srcfilename)
##########################################
# detailed summary, if requested
##########################################
if detail:
for marker in ["+", "-"]:
if marker=="+":
dict = WHITEHITS
else:
dict = BLACKHITS
tuples = []
for key in dict.keys():
tuples.append((dict[key], key))
tuples.sort()
tuples.reverse()
for tuple in tuples:
sys.stdout.write(marker + `tuple[0]` + "\t" + tuple[1] + "\n")
| Python |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import poplib
import imaplib, email
import filters
def main():
imap=True
username = 'nss.automatic.message@gmail.com'
password = 'ThisIsMyPassword'
keyFilt = filters.kwordFilter()
if imap:
M=imaplib.IMAP4_SSL('imap.gmail.com', 993)
M.login(username,password)
status, count = M.select('Inbox')
for n in range(int(count[0])):
status, data = M.fetch(str(n+1), '(RFC822)')
msg = email.message_from_string(data[0][1])
print msg['From']
if keyFilt.isSpam(msg):
print "Attention SPAM"
else:
print "Not identified as SPAM"
M.close()
M.logout()
else:
mServer = poplib.POP3_SSL('pop.gmail.com')
mServer.user(username)
mServer.pass_(password)
numMessages = len(mServer.list()[1])
print "You have %d messages." % (numMessages)
print "Message List:"
#List the subject line of each message
for mList in range(numMessages) :
for msg in mServer.retr(mList+1)[1]:
if msg.startswith('Subject'):
print '\t' + msg
break
mServer.quit()
if __name__ == "__main__":
main()
| Python |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
import sys
sys.path.append("~/5a-bucarest-vallee-cedric/asois/project")
import orange
class Filter:
def __init__(self):
print "init filt"
self.file=""
def isSpam(mail):
pass
def userDecision():
pass
class kwordFilter(Filter):
def __init__(self):
print "init keyword filter"
self.file= "filters/contentBlock.txt"
f = open(self.file,'r')
self.list = f.readlines()
self.list = [s.replace("\n","") for s in self.list]
f.close()
def isSpam(self,mail):
for k in self.list:
if k in str(mail):
return True
return False
if __name__ == "__main__":
obj = kwordFilter()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = "/"
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
if number != 1:
return """<Error number="%s" />""" % (number)
else:
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '\\.(?![^.]*$)', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
# Check for invalid folder paths (..)
if re.search( '(/\\.)|(//)|([\\\\:\\*\\?\\""\\<\\>\\|]|[\x00-\x1F]|[\x7f-\x9f])', currentFolder ):
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the "File Uploader" for Python
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorQuickUpload( FCKeditorConnectorBase,
UploadFileCommandMixin,
BaseHttpMixin, BaseHtmlMixin):
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendUploadResults(1, "This file uploader is disabled. Please check the \"editor/filemanager/connectors/py/config.py\"")
command = 'QuickUpload'
# The file type (from the QueryString, by default 'File').
resourceType = self.request.get('Type','File')
currentFolder = "/"
# Check for invalid paths
if currentFolder is None:
return self.sendUploadResults(102, '', '', "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendUploadResults( 1, '', '', 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendUploadResults( 1, '', '', 'Invalid type specified' )
# Setup paths
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFoldercreateServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
return self.uploadFile(resourceType, currentFolder)
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorQuickUpload()
data = conn.doResponse()
for header in conn.headers:
if not header is None:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Utility functions for the File Manager Connector for Python
"""
import string, re
import os
import config as Config
# Generic manipulation functions
def removeExtension(fileName):
index = fileName.rindex(".")
newFileName = fileName[0:index]
return newFileName
def getExtension(fileName):
index = fileName.rindex(".") + 1
fileExtension = fileName[index:]
return fileExtension
def removeFromStart(string, char):
return string.lstrip(char)
def removeFromEnd(string, char):
return string.rstrip(char)
# Path functions
def combinePaths( basePath, folder ):
return removeFromEnd( basePath, '/' ) + '/' + removeFromStart( folder, '/' )
def getFileName(filename):
" Purpose: helper function to extrapolate the filename "
for splitChar in ["/", "\\"]:
array = filename.split(splitChar)
if (len(array) > 1):
filename = array[-1]
return filename
def sanitizeFolderName( newFolderName ):
"Do a cleanup of the folder name to avoid possible problems"
# Remove . \ / | : ? * " < > and control characters
return re.sub( '\\.|\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]', '_', newFolderName )
def sanitizeFileName( newFileName ):
"Do a cleanup of the file name to avoid possible problems"
# Replace dots in the name with underscores (only one dot can be there... security issue).
if ( Config.ForceSingleExtension ): # remove dots
newFileName = re.sub ( '\\.(?![^.]*$)', '_', newFileName ) ;
newFileName = newFileName.replace('\\','/') # convert windows to unix path
newFileName = os.path.basename (newFileName) # strip directories
# Remove \ / | : ? *
return re.sub ( '\\\\|\\/|\\||\\:|\\?|\\*|"|<|>|[\x00-\x1f\x7f-\x9f]/', '_', newFileName )
def getCurrentFolder(currentFolder):
if not currentFolder:
currentFolder = '/'
# Check the current folder syntax (must begin and end with a slash).
if (currentFolder[-1] <> "/"):
currentFolder += "/"
if (currentFolder[0] <> "/"):
currentFolder = "/" + currentFolder
# Ensure the folder path has no double-slashes
while '//' in currentFolder:
currentFolder = currentFolder.replace('//','/')
# Check for invalid folder paths (..)
if '..' in currentFolder or '\\' in currentFolder:
return None
# Check for invalid folder paths (..)
if re.search( '(/\\.)|(//)|([\\\\:\\*\\?\\""\\<\\>\\|]|[\x00-\x1F]|[\x7f-\x9f])', currentFolder ):
return None
return currentFolder
def mapServerPath( environ, url):
" Emulate the asp Server.mapPath function. Given an url path return the physical directory that it corresponds to "
# This isn't correct but for the moment there's no other solution
# If this script is under a virtual directory or symlink it will detect the problem and stop
return combinePaths( getRootPath(environ), url )
def mapServerFolder(resourceTypePath, folderPath):
return combinePaths ( resourceTypePath , folderPath )
def getRootPath(environ):
"Purpose: returns the root path on the server"
# WARNING: this may not be thread safe, and doesn't work w/ VirtualServer/mod_python
# Use Config.UserFilesAbsolutePath instead
if environ.has_key('DOCUMENT_ROOT'):
return environ['DOCUMENT_ROOT']
else:
realPath = os.path.realpath( './' )
selfPath = environ['SCRIPT_FILENAME']
selfPath = selfPath [ : selfPath.rfind( '/' ) ]
selfPath = selfPath.replace( '/', os.path.sep)
position = realPath.find(selfPath)
# This can check only that this script isn't run from a virtual dir
# But it avoids the problems that arise if it isn't checked
raise realPath
if ( position < 0 or position <> len(realPath) - len(selfPath) or realPath[ : position ]==''):
raise Exception('Sorry, can\'t map "UserFilesPath" to a physical path. You must set the "UserFilesAbsolutePath" value in "editor/filemanager/connectors/py/config.py".')
return realPath[ : position ]
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
if (command == "FileUpload"):
return self.sendUploadResults( errorNo = 102, customMsg = "" )
else:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
if size > 0:
size = round(size/1024)
if size < 1:
size = 1
files += """<File name="%s" size="%d" />""" % (
convertToXmlAttribute(someObject),
size
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
from time import gmtime, strftime
import string
def escape(text, replace=string.replace):
"""
Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
return text
def convertToXmlAttribute(value):
if (value is None):
value = ""
return escape(value)
class BaseHttpMixin(object):
def setHttpHeaders(self, content_type='text/xml'):
"Purpose: to prepare the headers for the xml to return"
# Prevent the browser from caching the result.
# Date in the past
self.setHeader('Expires','Mon, 26 Jul 1997 05:00:00 GMT')
# always modified
self.setHeader('Last-Modified',strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()))
# HTTP/1.1
self.setHeader('Cache-Control','no-store, no-cache, must-revalidate')
self.setHeader('Cache-Control','post-check=0, pre-check=0')
# HTTP/1.0
self.setHeader('Pragma','no-cache')
# Set the response format.
self.setHeader( 'Content-Type', content_type + '; charset=utf-8' )
return
class BaseXmlMixin(object):
def createXmlHeader(self, command, resourceType, currentFolder, url):
"Purpose: returns the xml header"
self.setHttpHeaders()
# Create the XML document header
s = """<?xml version="1.0" encoding="utf-8" ?>"""
# Create the main connector node
s += """<Connector command="%s" resourceType="%s">""" % (
command,
resourceType
)
# Add the current folder node
s += """<CurrentFolder path="%s" url="%s" />""" % (
convertToXmlAttribute(currentFolder),
convertToXmlAttribute(url),
)
return s
def createXmlFooter(self):
"Purpose: returns the xml footer"
return """</Connector>"""
def sendError(self, number, text):
"Purpose: in the event of an error, return an xml based error"
self.setHttpHeaders()
return ("""<?xml version="1.0" encoding="utf-8" ?>""" +
"""<Connector>""" +
self.sendErrorNode (number, text) +
"""</Connector>""" )
def sendErrorNode(self, number, text):
if number != 1:
return """<Error number="%s" />""" % (number)
else:
return """<Error number="%s" text="%s" />""" % (number, convertToXmlAttribute(text))
class BaseHtmlMixin(object):
def sendUploadResults( self, errorNo = 0, fileUrl = '', fileName = '', customMsg = '' ):
self.setHttpHeaders("text/html")
"This is the function that sends the results of the uploading process"
"Minified version of the document.domain automatic fix script (#1919)."
"The original script can be found at _dev/domain_fix_template.js"
return """<script type="text/javascript">
(function(){var d=document.domain;while (true){try{var A=window.parent.document.domain;break;}catch(e) {};d=d.replace(/.*?(?:\.|$)/,'');if (d.length==0) break;try{document.domain=d;}catch (e){break;}}})();
window.parent.OnUploadCompleted(%(errorNumber)s,"%(fileUrl)s","%(fileName)s","%(customMsg)s");
</script>""" % {
'errorNumber': errorNo,
'fileUrl': fileUrl.replace ('"', '\\"'),
'fileName': fileName.replace ( '"', '\\"' ) ,
'customMsg': customMsg.replace ( '"', '\\"' ),
}
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2010 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Base Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import cgi, os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
import config as Config
class FCKeditorConnectorBase( object ):
"The base connector class. Subclass it to extend functionality (see Zope example)"
def __init__(self, environ=None):
"Constructor: Here you should parse request fields, initialize variables, etc."
self.request = FCKeditorRequest(environ) # Parse request
self.headers = [] # Clean Headers
if environ:
self.environ = environ
else:
self.environ = os.environ
# local functions
def setHeader(self, key, value):
self.headers.append ((key, value))
return
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, environ):
if environ: # WSGI
self.request = cgi.FieldStorage(fp=environ['wsgi.input'],
environ=environ,
keep_blank_values=1)
self.environ = environ
else: # plain old cgi
self.environ = os.environ
self.request = cgi.FieldStorage()
if 'REQUEST_METHOD' in self.environ and 'QUERY_STRING' in self.environ:
if self.environ['REQUEST_METHOD'].upper()=='POST':
# we are in a POST, but GET query_string exists
# cgi parses by default POST data, so parse GET QUERY_STRING too
self.get_request = cgi.FieldStorage(fp=None,
environ={
'REQUEST_METHOD':'GET',
'QUERY_STRING':self.environ['QUERY_STRING'],
},
)
else:
self.get_request={}
def has_key(self, key):
return self.request.has_key(key) or self.get_request.has_key(key)
def get(self, key, default=None):
if key in self.request.keys():
field = self.request[key]
elif key in self.get_request.keys():
field = self.get_request[key]
else:
return default
if hasattr(field,"filename") and field.filename: #file upload, do not convert return value
return field
else:
return field.value
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
"""
import os
try: # Windows needs stdio set for binary mode for file upload to work.
import msvcrt
msvcrt.setmode (0, os.O_BINARY) # stdin = 0
msvcrt.setmode (1, os.O_BINARY) # stdout = 1
except ImportError:
pass
from fckutil import *
from fckoutput import *
import config as Config
class GetFoldersCommandMixin (object):
def getFolders(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
s = """<Folders>""" # Open the folders node
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
s += """</Folders>""" # Close the folders node
return s
class GetFoldersAndFilesCommandMixin (object):
def getFoldersAndFiles(self, resourceType, currentFolder):
"""
Purpose: command to recieve a list of folders and files
"""
# Map the virtual path to our local server
serverPath = mapServerFolder(self.userFilesFolder,currentFolder)
# Open the folders / files node
folders = """<Folders>"""
files = """<Files>"""
for someObject in os.listdir(serverPath):
someObjectPath = mapServerFolder(serverPath, someObject)
if os.path.isdir(someObjectPath):
folders += """<Folder name="%s" />""" % (
convertToXmlAttribute(someObject)
)
elif os.path.isfile(someObjectPath):
size = os.path.getsize(someObjectPath)
if size > 0:
size = round(size/1024)
if size < 1:
size = 1
files += """<File name="%s" size="%d" />""" % (
convertToXmlAttribute(someObject),
size
)
# Close the folders / files node
folders += """</Folders>"""
files += """</Files>"""
return folders + files
class CreateFolderCommandMixin (object):
def createFolder(self, resourceType, currentFolder):
"""
Purpose: command to create a new folder
"""
errorNo = 0; errorMsg ='';
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
newFolder = sanitizeFolderName (newFolder)
try:
newFolderPath = mapServerFolder(self.userFilesFolder, combinePaths(currentFolder, newFolder))
self.createServerFolder(newFolderPath)
except Exception, e:
errorMsg = str(e).decode('iso-8859-1').encode('utf-8') # warning with encodigns!!!
if hasattr(e,'errno'):
if e.errno==17: #file already exists
errorNo=0
elif e.errno==13: # permission denied
errorNo = 103
elif e.errno==36 or e.errno==2 or e.errno==22: # filename too long / no such file / invalid name
errorNo = 102
else:
errorNo = 110
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def createServerFolder(self, folderPath):
"Purpose: physically creates a folder on the server"
# No need to check if the parent exists, just create all hierachy
try:
permissions = Config.ChmodOnFolderCreate
if not permissions:
os.makedirs(folderPath)
except AttributeError: #ChmodOnFolderCreate undefined
permissions = 0755
if permissions:
oldumask = os.umask(0)
os.makedirs(folderPath,mode=0755)
os.umask( oldumask )
class UploadFileCommandMixin (object):
def uploadFile(self, resourceType, currentFolder):
"""
Purpose: command to upload files to server (same as FileUpload)
"""
errorNo = 0
if self.request.has_key("NewFile"):
# newFile has all the contents we need
newFile = self.request.get("NewFile", "")
# Get the file name
newFileName = newFile.filename
newFileName = sanitizeFileName( newFileName )
newFileNameOnly = removeExtension(newFileName)
newFileExtension = getExtension(newFileName).lower()
allowedExtensions = Config.AllowedExtensions[resourceType]
deniedExtensions = Config.DeniedExtensions[resourceType]
if (allowedExtensions):
# Check for allowed
isAllowed = False
if (newFileExtension in allowedExtensions):
isAllowed = True
elif (deniedExtensions):
# Check for denied
isAllowed = True
if (newFileExtension in deniedExtensions):
isAllowed = False
else:
# No extension limitations
isAllowed = True
if (isAllowed):
# Upload to operating system
# Map the virtual path to the local server path
currentFolderPath = mapServerFolder(self.userFilesFolder, currentFolder)
i = 0
while (True):
newFilePath = os.path.join (currentFolderPath,newFileName)
if os.path.exists(newFilePath):
i += 1
newFileName = "%s(%d).%s" % (
newFileNameOnly, i, newFileExtension
)
errorNo= 201 # file renamed
else:
# Read file contents and write to the desired path (similar to php's move_uploaded_file)
fout = file(newFilePath, 'wb')
while (True):
chunk = newFile.file.read(100000)
if not chunk: break
fout.write (chunk)
fout.close()
if os.path.exists ( newFilePath ):
doChmod = False
try:
doChmod = Config.ChmodOnUpload
permissions = Config.ChmodOnUpload
except AttributeError: #ChmodOnUpload undefined
doChmod = True
permissions = 0755
if ( doChmod ):
oldumask = os.umask(0)
os.chmod( newFilePath, permissions )
os.umask( oldumask )
newFileUrl = combinePaths(self.webUserFilesFolder, currentFolder) + newFileName
return self.sendUploadResults( errorNo , newFileUrl, newFileName )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "" )
else:
return self.sendUploadResults( errorNo = 202, customMsg = "No File" )
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector/QuickUpload for Python (WSGI wrapper).
See config.py for configuration settings
"""
from connector import FCKeditorConnector
from upload import FCKeditorQuickUpload
import cgitb
from cStringIO import StringIO
# Running from WSGI capable server (recomended)
def App(environ, start_response):
"WSGI entry point. Run the connector"
if environ['SCRIPT_NAME'].endswith("connector.py"):
conn = FCKeditorConnector(environ)
elif environ['SCRIPT_NAME'].endswith("upload.py"):
conn = FCKeditorQuickUpload(environ)
else:
start_response ("200 Ok", [('Content-Type','text/html')])
yield "Unknown page requested: "
yield environ['SCRIPT_NAME']
return
try:
# run the connector
data = conn.doResponse()
# Start WSGI response:
start_response ("200 Ok", conn.headers)
# Send response text
yield data
except:
start_response("500 Internal Server Error",[("Content-type","text/html")])
file = StringIO()
cgitb.Hook(file = file).handle()
yield file.getvalue()
| Python |
#!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python (CGI and WSGI).
See config.py for configuration settings
"""
import os
from fckutil import *
from fckcommands import * # default command's implementation
from fckoutput import * # base http, xml and html output mixins
from fckconnector import FCKeditorConnectorBase # import base connector
import config as Config
class FCKeditorConnector( FCKeditorConnectorBase,
GetFoldersCommandMixin,
GetFoldersAndFilesCommandMixin,
CreateFolderCommandMixin,
UploadFileCommandMixin,
BaseHttpMixin, BaseXmlMixin, BaseHtmlMixin ):
"The Standard connector class."
def doResponse(self):
"Main function. Process the request, set headers and return a string as response."
s = ""
# Check if this connector is disabled
if not(Config.Enabled):
return self.sendError(1, "This connector is disabled. Please check the connector configurations in \"editor/filemanager/connectors/py/config.py\" and try again.")
# Make sure we have valid inputs
for key in ("Command","Type","CurrentFolder"):
if not self.request.has_key (key):
return
# Get command, resource type and current folder
command = self.request.get("Command")
resourceType = self.request.get("Type")
currentFolder = getCurrentFolder(self.request.get("CurrentFolder"))
# Check for invalid paths
if currentFolder is None:
if (command == "FileUpload"):
return self.sendUploadResults( errorNo = 102, customMsg = "" )
else:
return self.sendError(102, "")
# Check if it is an allowed command
if ( not command in Config.ConfigAllowedCommands ):
return self.sendError( 1, 'The %s command isn\'t allowed' % command )
if ( not resourceType in Config.ConfigAllowedTypes ):
return self.sendError( 1, 'Invalid type specified' )
# Setup paths
if command == "QuickUpload":
self.userFilesFolder = Config.QuickUploadAbsolutePath[resourceType]
self.webUserFilesFolder = Config.QuickUploadPath[resourceType]
else:
self.userFilesFolder = Config.FileTypesAbsolutePath[resourceType]
self.webUserFilesFolder = Config.FileTypesPath[resourceType]
if not self.userFilesFolder: # no absolute path given (dangerous...)
self.userFilesFolder = mapServerPath(self.environ,
self.webUserFilesFolder)
# Ensure that the directory exists.
if not os.path.exists(self.userFilesFolder):
try:
self.createServerFolder( self.userFilesFolder )
except:
return self.sendError(1, "This connector couldn\'t access to local user\'s files directories. Please check the UserFilesAbsolutePath in \"editor/filemanager/connectors/py/config.py\" and try again. ")
# File upload doesn't have to return XML, so intercept here
if (command == "FileUpload"):
return self.uploadFile(resourceType, currentFolder)
# Create Url
url = combinePaths( self.webUserFilesFolder, currentFolder )
# Begin XML
s += self.createXmlHeader(command, resourceType, currentFolder, url)
# Execute the command
selector = {"GetFolders": self.getFolders,
"GetFoldersAndFiles": self.getFoldersAndFiles,
"CreateFolder": self.createFolder,
}
s += selector[command](resourceType, currentFolder)
s += self.createXmlFooter()
return s
# Running from command line (plain old CGI)
if __name__ == '__main__':
try:
# Create a Connector Instance
conn = FCKeditorConnector()
data = conn.doResponse()
for header in conn.headers:
print '%s: %s' % header
print
print data
except:
print "Content-Type: text/plain"
print
import cgi
cgi.print_exception()
| Python |
#!/usr/bin/env python
"""
* FCKeditor - The text editor for Internet - http://www.fckeditor.net
* Copyright (C) 2003-2010 Frederico Caldeira Knabben
*
* == BEGIN LICENSE ==
*
* Licensed under the terms of any of the following licenses at your
* choice:
*
* - GNU General Public License Version 2 or later (the "GPL")
* http://www.gnu.org/licenses/gpl.html
*
* - GNU Lesser General Public License Version 2.1 or later (the "LGPL")
* http://www.gnu.org/licenses/lgpl.html
*
* - Mozilla Public License Version 1.1 or later (the "MPL")
* http://www.mozilla.org/MPL/MPL-1.1.html
*
* == END LICENSE ==
*
* Configuration file for the File Manager Connector for Python
"""
# INSTALLATION NOTE: You must set up your server environment accordingly to run
# python scripts. This connector requires Python 2.4 or greater.
#
# Supported operation modes:
# * WSGI (recommended): You'll need apache + mod_python + modpython_gateway
# or any web server capable of the WSGI python standard
# * Plain Old CGI: Any server capable of running standard python scripts
# (although mod_python is recommended for performance)
# This was the previous connector version operation mode
#
# If you're using Apache web server, replace the htaccess.txt to to .htaccess,
# and set the proper options and paths.
# For WSGI and mod_python, you may need to download modpython_gateway from:
# http://projects.amor.org/misc/svn/modpython_gateway.py and copy it in this
# directory.
# SECURITY: You must explicitly enable this "connector". (Set it to "True").
# WARNING: don't just set "ConfigIsEnabled = True", you must be sure that only
# authenticated users can access this file or use some kind of session checking.
Enabled = False
# Path to user files relative to the document root.
UserFilesPath = '/userfiles/'
# Fill the following value it you prefer to specify the absolute path for the
# user files directory. Useful if you are using a virtual directory, symbolic
# link or alias. Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'UserFilesPath' must point to the same directory.
# WARNING: GetRootPath may not work in virtual or mod_python configurations, and
# may not be thread safe. Use this configuration parameter instead.
UserFilesAbsolutePath = ''
# Due to security issues with Apache modules, it is recommended to leave the
# following setting enabled.
ForceSingleExtension = True
# What the user can do with this connector
ConfigAllowedCommands = [ 'QuickUpload', 'FileUpload', 'GetFolders', 'GetFoldersAndFiles', 'CreateFolder' ]
# Allowed Resource Types
ConfigAllowedTypes = ['File', 'Image', 'Flash', 'Media']
# After file is uploaded, sometimes it is required to change its permissions
# so that it was possible to access it at the later time.
# If possible, it is recommended to set more restrictive permissions, like 0755.
# Set to 0 to disable this feature.
# Note: not needed on Windows-based servers.
ChmodOnUpload = 0755
# See comments above.
# Used when creating folders that does not exist.
ChmodOnFolderCreate = 0755
# Do not touch this 3 lines, see "Configuration settings for each Resource Type"
AllowedExtensions = {}; DeniedExtensions = {};
FileTypesPath = {}; FileTypesAbsolutePath = {};
QuickUploadPath = {}; QuickUploadAbsolutePath = {};
# Configuration settings for each Resource Type
#
# - AllowedExtensions: the possible extensions that can be allowed.
# If it is empty then any file type can be uploaded.
# - DeniedExtensions: The extensions that won't be allowed.
# If it is empty then no restrictions are done here.
#
# For a file to be uploaded it has to fulfill both the AllowedExtensions
# and DeniedExtensions (that's it: not being denied) conditions.
#
# - FileTypesPath: the virtual folder relative to the document root where
# these resources will be located.
# Attention: It must start and end with a slash: '/'
#
# - FileTypesAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'FileTypesPath' must point to the same directory.
# Attention: It must end with a slash: '/'
#
#
# - QuickUploadPath: the virtual folder relative to the document root where
# these resources will be uploaded using the Upload tab in the resources
# dialogs.
# Attention: It must start and end with a slash: '/'
#
# - QuickUploadAbsolutePath: the physical path to the above folder. It must be
# an absolute path.
# If it's an empty string then it will be autocalculated.
# Useful if you are using a virtual directory, symbolic link or alias.
# Examples: 'C:\\MySite\\userfiles\\' or '/root/mysite/userfiles/'.
# Attention: The above 'QuickUploadPath' must point to the same directory.
# Attention: It must end with a slash: '/'
AllowedExtensions['File'] = ['7z','aiff','asf','avi','bmp','csv','doc','fla','flv','gif','gz','gzip','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','ods','odt','pdf','png','ppt','pxd','qt','ram','rar','rm','rmi','rmvb','rtf','sdc','sitd','swf','sxc','sxw','tar','tgz','tif','tiff','txt','vsd','wav','wma','wmv','xls','xml','zip']
DeniedExtensions['File'] = []
FileTypesPath['File'] = UserFilesPath + 'file/'
FileTypesAbsolutePath['File'] = (not UserFilesAbsolutePath == '') and (UserFilesAbsolutePath + 'file/') or ''
QuickUploadPath['File'] = FileTypesPath['File']
QuickUploadAbsolutePath['File'] = FileTypesAbsolutePath['File']
AllowedExtensions['Image'] = ['bmp','gif','jpeg','jpg','png']
DeniedExtensions['Image'] = []
FileTypesPath['Image'] = UserFilesPath + 'image/'
FileTypesAbsolutePath['Image'] = (not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'image/' or ''
QuickUploadPath['Image'] = FileTypesPath['Image']
QuickUploadAbsolutePath['Image']= FileTypesAbsolutePath['Image']
AllowedExtensions['Flash'] = ['swf','flv']
DeniedExtensions['Flash'] = []
FileTypesPath['Flash'] = UserFilesPath + 'flash/'
FileTypesAbsolutePath['Flash'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'flash/' or ''
QuickUploadPath['Flash'] = FileTypesPath['Flash']
QuickUploadAbsolutePath['Flash']= FileTypesAbsolutePath['Flash']
AllowedExtensions['Media'] = ['aiff','asf','avi','bmp','fla', 'flv','gif','jpeg','jpg','mid','mov','mp3','mp4','mpc','mpeg','mpg','png','qt','ram','rm','rmi','rmvb','swf','tif','tiff','wav','wma','wmv']
DeniedExtensions['Media'] = []
FileTypesPath['Media'] = UserFilesPath + 'media/'
FileTypesAbsolutePath['Media'] = ( not UserFilesAbsolutePath == '') and UserFilesAbsolutePath + 'media/' or ''
QuickUploadPath['Media'] = FileTypesPath['Media']
QuickUploadAbsolutePath['Media']= FileTypesAbsolutePath['Media']
| Python |
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2010 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
This is the integration file for Python.
"""
import cgi
import os
import re
import string
def escape(text, replace=string.replace):
"""Converts the special characters '<', '>', and '&'.
RFC 1866 specifies that these characters be represented
in HTML as < > and & respectively. In Python
1.5 we use the new string.replace() function for speed.
"""
text = replace(text, '&', '&') # must be done 1st
text = replace(text, '<', '<')
text = replace(text, '>', '>')
text = replace(text, '"', '"')
text = replace(text, "'", ''')
return text
# The FCKeditor class
class FCKeditor(object):
def __init__(self, instanceName):
self.InstanceName = instanceName
self.BasePath = '/fckeditor/'
self.Width = '100%'
self.Height = '200'
self.ToolbarSet = 'Default'
self.Value = '';
self.Config = {}
def Create(self):
return self.CreateHtml()
def CreateHtml(self):
HtmlValue = escape(self.Value)
Html = ""
if (self.IsCompatible()):
File = "fckeditor.html"
Link = "%seditor/%s?InstanceName=%s" % (
self.BasePath,
File,
self.InstanceName
)
if (self.ToolbarSet is not None):
Link += "&Toolbar=%s" % self.ToolbarSet
# Render the linked hidden field
Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.InstanceName,
HtmlValue
)
# Render the configurations hidden field
Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % (
self.InstanceName,
self.GetConfigFieldString()
)
# Render the editor iframe
Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % (
self.InstanceName,
Link,
self.Width,
self.Height
)
else:
if (self.Width.find("%%") < 0):
WidthCSS = "%spx" % self.Width
else:
WidthCSS = self.Width
if (self.Height.find("%%") < 0):
HeightCSS = "%spx" % self.Height
else:
HeightCSS = self.Height
Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % (
self.InstanceName,
WidthCSS,
HeightCSS,
HtmlValue
)
return Html
def IsCompatible(self):
if (os.environ.has_key("HTTP_USER_AGENT")):
sAgent = os.environ.get("HTTP_USER_AGENT", "")
else:
sAgent = ""
if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0):
i = sAgent.find("MSIE")
iVersion = float(sAgent[i+5:i+5+3])
if (iVersion >= 5.5):
return True
return False
elif (sAgent.find("Gecko/") >= 0):
i = sAgent.find("Gecko/")
iVersion = int(sAgent[i+6:i+6+8])
if (iVersion >= 20030210):
return True
return False
elif (sAgent.find("Opera/") >= 0):
i = sAgent.find("Opera/")
iVersion = float(sAgent[i+6:i+6+4])
if (iVersion >= 9.5):
return True
return False
elif (sAgent.find("AppleWebKit/") >= 0):
p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE)
m = p.search(sAgent)
if (m.group(1) >= 522):
return True
return False
else:
return False
def GetConfigFieldString(self):
sParams = ""
bFirst = True
for sKey in self.Config.keys():
sValue = self.Config[sKey]
if (not bFirst):
sParams += "&"
else:
bFirst = False
if (sValue):
k = escape(sKey)
v = escape(sValue)
if (sValue == "true"):
sParams += "%s=true" % k
elif (sValue == "false"):
sParams += "%s=false" % k
else:
sParams += "%s=%s" % (k, v)
return sParams
| Python |
#-------------------------------------------------------------------------------
# Name: analyze_graph
# Purpose:
#
# Author: Azfar Khandoker
#
# Created: 10/04/2013
# Copyright: (c) Azfar Khandoker 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
from igraph import *
#this should be *JUST* the filename, not the path to it
#for example, input would be: "apple.orange.gml"
filename = raw_input("filename: ")
#the graphs should be located in the graphs/
#directory, which is one level above this
#working directory
file_to_read = "../graphs/" + filename
print ("reading " + file_to_read)
g = Graph.Read_GML(file_to_read)
summary(g)
print("\naverage path length = " + str(g.average_path_length()))
#this should gives us an array of
#3 elements with the 0th element being
#the seed word, the 1th element being the
#target word and the 2th element being "gml"
result = filename.split('.')
seedWord = result[0]
targetWord = result[1]
print ("seed word = \"" + seedWord + "\"")
print ("target word = \"" + targetWord + "\"")
a = g.vs.select(label_eq=seedWord)[0]
b = g.vs.select(label_eq=targetWord)[0]
path = g.get_all_shortest_paths(a,to=b)[0]
for i in path:
print "%d: %s" % (i, g.vs[i]["label"])
print "Length: %d" % len(path)
#plot(g, "output.png", margin = 50)
| Python |
#-------------------------------------------------------------------------------
# Name: amazon_generator
# Purpose:
#
# Author: Azfar Khandoker
#
# Created: 17/04/2013
# Copyright: (c) Azfar Khandoker 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
from igraph import *
import re
seeds = [
'las vegas',
'the beatles',
'oreo',
'zebra',
'sony'
]
k = 4
#read the GML data into memory so we do not
#need to keep the GML file open
f = open('amazon.gml', 'r')
g = Graph.Read_GML(f)
f.close()
for seed in seeds:
count = 0
#this regular expression allows only the whole word of the
#seed to be matched
#in other words, if there is a letter before or after the
#seed word, we do not count it as the seed word
#therefore with a seed of 'car', 'card' nor 'scar' will get
#matched and will be ignored
regex = re.compile('.*([^a-z]|\A)' + seed + '([^a-z]|$).*')
for v in g.vs:
#see if the label of the vertex matches the regex
#we first transform the label to lower case to avoid
#the case-sensitivity issue
if regex.match(v['label'].lower()) != None:
#returns a subgraph rooted at the matched vertex
#containing all verticies at most k hops away from it
#this is called the neighborhood of a vertex in a graph
neighborhood = g.neighborhood(int(v['id']), k)
#create a subgraph of the original graph containing
#all the verticies that are in the neighborhood of the
#matched vertex
subgraph = g.subgraph(neighborhood)
#many verticies may match the regular expression,
#but not all of them will generate interesting neighborhoods
#therefore, we only consider those verticies whose
#neighborhoods will generate subgraphs of the original
#graph that have diameter at least k
#since multiple of these such graphs are possible,
#we use a counter to allow for unique filenames when outputting
if subgraph.diameter() >= k:
f = open('output/' + seed + '_' + str(count) + '.gml', 'w')
subgraph.write_gml(f)
f.close()
count += 1
if count == 0:
print(seed + 'not found in graph')
| Python |
#original Amazon data from
#http://snap.stanford.edu/data/bigdata/amazon/amazon-meta.txt.gz
f = open("amazon-meta.txt", "r")
#the ASIN id
count = 0
#dictionary associates an ASIN with
#its title and similar products
d = dict()
#list to keep the ASINs ordered according to
#order in which they were encountered
l = []
for line in f:
#seen line starting with 'ASIN'
#check next line for 'title'
if line.startswith("ASIN"):
ASIN = line[6:-1]
continue
#seen line starting with 'title'
#check next line for 'similar'
elif line.startswith(" title"):
title = line[9:-1]
continue
#once we have seen a line beginning with
#'similar', we must have seen the proper
#ASIN and title of the corresponding product
#therefore, we do execute the remainder of the loop
elif line.startswith(" similar"):
similar = line[12:-1].split(' ')[1:]
#ignore any other lines, read next line
else:
continue
l.append(ASIN)
#add ASIN entry to dictionary with its information
d[ASIN] = [count, title, similar]
count += 1
f.close()
f1 = open("edge-list.txt", "w")
f2 = open("vertex-labels.txt", "w")
for asin in l:
#get the ID for this ASIN
asinID = str(d[asin][0])
#write the edge list for this data in igraph-friendly format
#only add an entry to the edge list if the source and target
#of the edge is within this data set
for similar in d[asin][2]:
if similar in d:
f1.write(asinID + ' ' + str(d[similar][0]) + '\n')
#this file will associate each vertex ID with its 'label'
#attribute
f2.write(asinID + ' ' + str(d[asin][1]) + '\n')
f1.close()
f2.close()
| Python |
#!/bin/usr/python
from igraph import *
print "Filename: "
file = raw_input()
g = Graph.Read_GML(file)
print g.summary()
n = g.degree_distribution().n
f = open(file + "_deg_dist.csv", 'w')
for left, right, count in g.degree_distribution().bins():
print >>f, "%d,%f" % (left, float(float(count)/n))
f.close()
| Python |
from igraph import *
num_gs_only = 0; # gs = Google Sets
num_shared = 0;
num_not_gs = 0;
g_name = raw_input("Google sets graph: ")
g = Graph.Read_GML(g_name);
h_name = raw_input("Amazon graph: ")
h = Graph.Read_GML(h_name);
for v in g.vs:
query = v["label"]
query = query.title().replace(" ", "_")
set = h.vs.select(label=query)
if len(set) == 0:
num_gs_only += 1
else:
num_shared += 1
num_not_gs = len(h.vs) - num_shared
print "Num gs only: " + str(num_gs_only)
print "Num shared: " + str(num_shared)
print "Num not gs: " + str(num_not_gs)
p1 = float(num_shared) / float(len(h.vs))
print "Fraction shared: " + str(p1)
| Python |
#-------------------------------------------------------------------------------
# Name: wiki_generator
# Purpose: Generates subgraphs with k = 4 for the given seed words
#
# Author: Ryan Miller
#
# Created: 17/04/2013
# Copyright: (c) Not Azfar Khandoker 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
from igraph import *
import re
seeds = [
'Las_Vegas',
'The_Beatles',
'Oreo',
'Zebra',
'Sony'
]
k = 2
print "Reading edge list"
#read the GML data into memory
g = Graph.Read_Edgelist("/homes/millerrv/scratch/links_el_final.txt")
print "Done"
print "Combining data with titles"
#combine data with "title" attribute
titles = []
f = open("/homes/millerrv/scratch/titles-sorted.txt", 'r')
for line in f:
titles.append(line.rstrip())
g.vs["title"] = titles
print "Done"
for seed in seeds:
print "Making subgraph for seed: " + seed
count = 0
vertex = g.vs.select(title=seed)[0]
neighborhood = g.neighborhood(vertex, k, "out")
subgraph = g.subgraph(neighborhood)
new_file = open("/homes/millerrv/scratch/" + seed + "_" + str(count) + ".graphml",
'w')
subgraph.write_graphml(new_file)
new_file.close() | Python |
from igraph import *
import matplotlib.pyplot as plt
plt.figure()
g = Graph.Read_GML(raw_input())
dd = g.degree_distribution()
xs, ys = zip(*[(left, count / float(dd.n)) for left, _, count in dd.bins()])
plt.xscale('log')
plt.yscale('log')
plt.title(r"$\mathrm{"+name+"\ Degree\ Distribution}$")
plt.xlabel(r"$\mathrm{Degree}$")
plt.ylabel(r"$\mathrm{Probability}$")
plt.plot(xs, ys)
plt.savefig("plot.png")
| Python |
def cosine_similarity(g,i,j):
adj = g.get_adjacency()
n = 0.0
for k in range(adj.shape[0]):
n = n + (adj[i,k] * adj[k,j])
sim = n / math.sqrt(g.vs[i].degree() * g.vs[j].degree())
return sim
# warning: takes a long time
def all_cosine_similarity(g):
list = []
for i in range(len(g.vs)):
for j in range(len(g.vs)):
list.append([i,j,cosine_similarity(g,i,j)])
return list
| Python |
#-------------------------------------------------------------------------------
# Name: edge_list_to_gml
# Purpose:
#
# Author: Azfar Khandoker
#
# Created: 17/04/2013
# Copyright: (c) Azfar Khandoker 2013
# Licence: <your licence>
#-------------------------------------------------------------------------------
from igraph import *
#read in the edge list
#have to use explicit file handlers to ensure
#data is read properly...otherwise igraph closes
#the file and flushes it when it pleases...messing
#up other scripts in the pipeline
f = open('edge-list.txt', 'r')
g = Graph.Read_Edgelist(f)
#no more need to read from file
f.close()
f = open("vertex-labels.txt", 'r')
#delete any double quotes that appear
#in the labels of the verticies
#double quotes mess things up
labels = []
for line in f:
labels.append(line[:-1].split(' ', 1)[1].replace('"', ''))
f.close()
g.vs["label"] = labels
#transform the graph from edge list format to
#GML format with labels, again using explicit file handles
f = open('amazon.gml', 'w')
g.write_gml(f)
f.close() | Python |
#!/bin/usr/python
from igraph import *
print "Filename: "
file = raw_input()
g = Graph.Read_GML(file)
print g.summary()
n = g.degree_distribution().n
f = open(file + "_deg_dist.csv", 'w')
for left, right, count in g.degree_distribution().bins():
print >>f, "%d,%f" % (left, float(float(count)/n))
f.close()
| Python |
#!/usr/bin/python
import sys
W = {}
W['A'] = []
W['W'] = []
W['F'] = W['W']
for line in sys.stdin:
i = line.find ('#')
if i >= 0:
line = line[:i]
line = line.strip ()
if not len (line):
continue
fields = [x.strip () for x in line.split (';')]
chars = fields[0]
width = fields[1]
if width not in ['A', 'W', 'F']:
continue
if chars.find ('..') > 0:
(start,end) = chars.split ('..')
else:
start = chars
end = chars
start, end = int(start,16), int(end,16)
for i in range (start, end+1):
W[width].append (i)
def write_intervals (S):
S.sort ()
start = S[0];
end = start - 1
for c in S:
if c == end+1:
end += 1
continue
else:
print "{0x%04X, 0x%04X}, " % (start, end)
start = c
end = start
print "{0x%04X, 0x%04X} " % (start, end)
print "table for g_unichar_iswide():"
print
write_intervals (W['W'])
print
print "table for g_unichar_iswide_cjk():"
print
write_intervals (W['A'])
| Python |
#! /usr/bin/env python
# GLib Testing Framework Utility -*- Mode: python; -*-
# Copyright (C) 2007 Imendio AB
# Authors: Tim Janik
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import datetime
import optparse
import sys, re, xml.dom.minidom
try:
import subunit
from subunit import iso8601
from testtools.content import Content, ContentType
mime_utf8 = ContentType('text', 'plain', {'charset': 'utf8'})
except ImportError:
subunit = None
pkginstall_configvars = {
#@PKGINSTALL_CONFIGVARS_IN24LINES@ # configvars are substituted upon script installation
}
# xml utilities
def find_child (node, child_name):
for child in node.childNodes:
if child.nodeName == child_name:
return child
return None
def list_children (node, child_name):
rlist = []
for child in node.childNodes:
if child.nodeName == child_name:
rlist += [ child ]
return rlist
def find_node (node, name = None):
if not node or node.nodeName == name or not name:
return node
for child in node.childNodes:
c = find_node (child, name)
if c:
return c
return None
def node_as_text (node, name = None):
if name:
node = find_node (node, name)
txt = ''
if node:
if node.nodeValue:
txt += node.nodeValue
for child in node.childNodes:
txt += node_as_text (child)
return txt
def attribute_as_text (node, aname, node_name = None):
node = find_node (node, node_name)
if not node:
return ''
attr = node.attributes.get (aname, '')
if hasattr (attr, 'value'):
return attr.value
return ''
# HTML utilities
def html_indent_string (n):
uncollapsible_space = ' ' # HTML won't compress alternating sequences of ' ' and ' '
string = ''
for i in range (0, (n + 1) / 2):
string += uncollapsible_space
return string
# TestBinary object, instantiated per test binary in the log file
class TestBinary:
def __init__ (self, name):
self.name = name
self.testcases = []
self.duration = 0
self.success_cases = 0
self.skipped_cases = 0
self.file = '???'
self.random_seed = ''
# base class to handle processing/traversion of XML nodes
class TreeProcess:
def __init__ (self):
self.nest_level = 0
def trampoline (self, node):
name = node.nodeName
if name == '#text':
self.handle_text (node)
else:
try: method = getattr (self, 'handle_' + re.sub ('[^a-zA-Z0-9]', '_', name))
except: method = None
if method:
return method (node)
else:
return self.process_recursive (name, node)
def process_recursive (self, node_name, node):
self.process_children (node)
def process_children (self, node):
self.nest_level += 1
for child in node.childNodes:
self.trampoline (child)
self.nest_level += 1
# test report reader, this class collects some statistics and merges duplicate test binary runs
class ReportReader (TreeProcess):
def __init__ (self):
TreeProcess.__init__ (self)
self.binary_names = []
self.binaries = {}
self.last_binary = None
self.info = {}
def binary_list (self):
lst = []
for name in self.binary_names:
lst += [ self.binaries[name] ]
return lst
def get_info (self):
return self.info
def handle_info (self, node):
dn = find_child (node, 'package')
self.info['package'] = node_as_text (dn)
dn = find_child (node, 'version')
self.info['version'] = node_as_text (dn)
dn = find_child (node, 'revision')
if dn is not None:
self.info['revision'] = node_as_text (dn)
def handle_testcase (self, node):
self.last_binary.testcases += [ node ]
result = attribute_as_text (node, 'result', 'status')
if result == 'success':
self.last_binary.success_cases += 1
if bool (int (attribute_as_text (node, 'skipped') + '0')):
self.last_binary.skipped_cases += 1
def handle_text (self, node):
pass
def handle_testbinary (self, node):
path = node.attributes.get ('path', None).value
if self.binaries.get (path, -1) == -1:
self.binaries[path] = TestBinary (path)
self.binary_names += [ path ]
self.last_binary = self.binaries[path]
dn = find_child (node, 'duration')
dur = node_as_text (dn)
try: dur = float (dur)
except: dur = 0
if dur:
self.last_binary.duration += dur
bin = find_child (node, 'binary')
if bin:
self.last_binary.file = attribute_as_text (bin, 'file')
rseed = find_child (node, 'random-seed')
if rseed:
self.last_binary.random_seed = node_as_text (rseed)
self.process_children (node)
class ReportWriter(object):
"""Base class for reporting."""
def __init__(self, binary_list):
self.binaries = binary_list
def _error_text(self, node):
"""Get a string representing the error children of node."""
rlist = list_children(node, 'error')
txt = ''
for enode in rlist:
txt += node_as_text (enode)
if txt and txt[-1] != '\n':
txt += '\n'
return txt
class HTMLReportWriter(ReportWriter):
# Javascript/CSS snippet to toggle element visibility
cssjs = r'''
<style type="text/css" media="screen">
.VisibleSection { }
.HiddenSection { display: none; }
</style>
<script language="javascript" type="text/javascript"><!--
function toggle_display (parentid, tagtype, idmatch, keymatch) {
ptag = document.getElementById (parentid);
tags = ptag.getElementsByTagName (tagtype);
for (var i = 0; i < tags.length; i++) {
tag = tags[i];
var key = tag.getAttribute ("keywords");
if (tag.id.indexOf (idmatch) == 0 && key && key.match (keymatch)) {
if (tag.className.indexOf ("HiddenSection") >= 0)
tag.className = "VisibleSection";
else
tag.className = "HiddenSection";
}
}
}
message_array = Array();
function view_testlog (wname, file, random_seed, tcase, msgtitle, msgid) {
txt = message_array[msgid];
var w = window.open ("", // URI
wname,
"resizable,scrollbars,status,width=790,height=400");
var doc = w.document;
doc.write ("<h2>File: " + file + "</h2>\n");
doc.write ("<h3>Case: " + tcase + "</h3>\n");
doc.write ("<strong>Random Seed:</strong> <code>" + random_seed + "</code> <br /><br />\n");
doc.write ("<strong>" + msgtitle + "</strong><br />\n");
doc.write ("<pre>");
doc.write (txt);
doc.write ("</pre>\n");
doc.write ("<a href=\'javascript:window.close()\'>Close Window</a>\n");
doc.close();
}
--></script>
'''
def __init__ (self, info, binary_list):
ReportWriter.__init__(self, binary_list)
self.info = info
self.bcounter = 0
self.tcounter = 0
self.total_tcounter = 0
self.total_fcounter = 0
self.total_duration = 0
self.indent_depth = 0
self.lastchar = ''
def oprint (self, message):
sys.stdout.write (message)
if message:
self.lastchar = message[-1]
def handle_info (self):
self.oprint ('<h3>Package: %(package)s, version: %(version)s</h3>\n' % self.info)
if self.info['revision']:
self.oprint ('<h5>Report generated from: %(revision)s</h5>\n' % self.info)
def handle_text (self, node):
self.oprint (node.nodeValue)
def handle_testcase (self, node, binary):
skipped = bool (int (attribute_as_text (node, 'skipped') + '0'))
if skipped:
return # skipped tests are uninteresting for HTML reports
path = attribute_as_text (node, 'path')
duration = node_as_text (node, 'duration')
result = attribute_as_text (node, 'result', 'status')
rcolor = {
'success': 'bgcolor="lightgreen"',
'failed': 'bgcolor="red"',
}.get (result, '')
if result != 'success':
duration = '-' # ignore bogus durations
self.oprint ('<tr id="b%u_t%u_" keywords="%s all" class="HiddenSection">\n' % (self.bcounter, self.tcounter, result))
self.oprint ('<td>%s %s</td> <td align="right">%s</td> \n' % (html_indent_string (4), path, duration))
perflist = list_children (node, 'performance')
if result != 'success':
txt = self._error_text(node)
txt = re.sub (r'"', r'\\"', txt)
txt = re.sub (r'\n', r'\\n', txt)
txt = re.sub (r'&', r'&', txt)
txt = re.sub (r'<', r'<', txt)
self.oprint ('<script language="javascript" type="text/javascript">message_array["b%u_t%u_"] = "%s";</script>\n' % (self.bcounter, self.tcounter, txt))
self.oprint ('<td align="center"><a href="javascript:view_testlog (\'%s\', \'%s\', \'%s\', \'%s\', \'Output:\', \'b%u_t%u_\')">Details</a></td>\n' %
('TestResultWindow', binary.file, binary.random_seed, path, self.bcounter, self.tcounter))
elif perflist:
presults = []
for perf in perflist:
pmin = bool (int (attribute_as_text (perf, 'minimize')))
pmax = bool (int (attribute_as_text (perf, 'maximize')))
pval = float (attribute_as_text (perf, 'value'))
txt = node_as_text (perf)
txt = re.sub (r'&', r'&', txt)
txt = re.sub (r'<', r'>', txt)
txt = '<strong>Performance(' + (pmin and '<em>minimized</em>' or '<em>maximized</em>') + '):</strong> ' + txt.strip() + '<br />\n'
txt = re.sub (r'"', r'\\"', txt)
txt = re.sub (r'\n', r'\\n', txt)
presults += [ (pval, txt) ]
presults.sort()
ptxt = ''.join ([e[1] for e in presults])
self.oprint ('<script language="javascript" type="text/javascript">message_array["b%u_t%u_"] = "%s";</script>\n' % (self.bcounter, self.tcounter, ptxt))
self.oprint ('<td align="center"><a href="javascript:view_testlog (\'%s\', \'%s\', \'%s\', \'%s\', \'Test Results:\', \'b%u_t%u_\')">Details</a></td>\n' %
('TestResultWindow', binary.file, binary.random_seed, path, self.bcounter, self.tcounter))
else:
self.oprint ('<td align="center">-</td>\n')
self.oprint ('<td align="right" %s>%s</td>\n' % (rcolor, result))
self.oprint ('</tr>\n')
self.tcounter += 1
self.total_tcounter += 1
self.total_fcounter += result != 'success'
def handle_binary (self, binary):
self.tcounter = 1
self.bcounter += 1
self.total_duration += binary.duration
self.oprint ('<tr><td><strong>%s</strong></td><td align="right">%f</td> <td align="center">\n' % (binary.name, binary.duration))
erlink, oklink = ('', '')
real_cases = len (binary.testcases) - binary.skipped_cases
if binary.success_cases < real_cases:
erlink = 'href="javascript:toggle_display (\'ResultTable\', \'tr\', \'b%u_\', \'failed\')"' % self.bcounter
if binary.success_cases:
oklink = 'href="javascript:toggle_display (\'ResultTable\', \'tr\', \'b%u_\', \'success\')"' % self.bcounter
if real_cases != 0:
self.oprint ('<a %s>ER</a>\n' % erlink)
self.oprint ('<a %s>OK</a>\n' % oklink)
self.oprint ('</td>\n')
perc = binary.success_cases * 100.0 / real_cases
pcolor = {
100 : 'bgcolor="lightgreen"',
0 : 'bgcolor="red"',
}.get (int (perc), 'bgcolor="yellow"')
self.oprint ('<td align="right" %s>%.2f%%</td>\n' % (pcolor, perc))
self.oprint ('</tr>\n')
else:
self.oprint ('Empty\n')
self.oprint ('</td>\n')
self.oprint ('</tr>\n')
for tc in binary.testcases:
self.handle_testcase (tc, binary)
def handle_totals (self):
self.oprint ('<tr>')
self.oprint ('<td><strong>Totals:</strong> %u Binaries, %u Tests, %u Failed, %u Succeeded</td>' %
(self.bcounter, self.total_tcounter, self.total_fcounter, self.total_tcounter - self.total_fcounter))
self.oprint ('<td align="right">%f</td>\n' % self.total_duration)
self.oprint ('<td align="center">-</td>\n')
if self.total_tcounter != 0:
perc = (self.total_tcounter - self.total_fcounter) * 100.0 / self.total_tcounter
else:
perc = 0.0
pcolor = {
100 : 'bgcolor="lightgreen"',
0 : 'bgcolor="red"',
}.get (int (perc), 'bgcolor="yellow"')
self.oprint ('<td align="right" %s>%.2f%%</td>\n' % (pcolor, perc))
self.oprint ('</tr>\n')
def printout (self):
self.oprint ('<html><head>\n')
self.oprint ('<title>GTester Unit Test Report</title>\n')
self.oprint (self.cssjs)
self.oprint ('</head>\n')
self.oprint ('<body>\n')
self.oprint ('<h2>GTester Unit Test Report</h2>\n')
self.handle_info ()
self.oprint ('<table id="ResultTable" width="100%" border="1">\n<tr>\n')
self.oprint ('<th>Program / Testcase </th>\n')
self.oprint ('<th style="width:8em">Duration (sec)</th>\n')
self.oprint ('<th style="width:5em">View</th>\n')
self.oprint ('<th style="width:5em">Result</th>\n')
self.oprint ('</tr>\n')
for tb in self.binaries:
self.handle_binary (tb)
self.handle_totals()
self.oprint ('</table>\n')
self.oprint ('</body>\n')
self.oprint ('</html>\n')
class SubunitWriter(ReportWriter):
"""Reporter to output a subunit stream."""
def printout(self):
reporter = subunit.TestProtocolClient(sys.stdout)
for binary in self.binaries:
for tc in binary.testcases:
test = GTestCase(tc, binary)
test.run(reporter)
class GTestCase(object):
"""A representation of a gtester test result as a pyunit TestCase."""
def __init__(self, case, binary):
"""Create a GTestCase for case `case` from binary program `binary`."""
self._case = case
self._binary = binary
# the name of the case - e.g. /dbusmenu/glib/objects/menuitem/props_boolstr
self._path = attribute_as_text(self._case, 'path')
def id(self):
"""What test is this? Returns the gtester path for the testcase."""
return self._path
def _get_details(self):
"""Calculate a details dict for the test - attachments etc."""
details = {}
result = attribute_as_text(self._case, 'result', 'status')
details['filename'] = Content(mime_utf8, lambda:[self._binary.file])
details['random_seed'] = Content(mime_utf8,
lambda:[self._binary.random_seed])
if self._get_outcome() == 'addFailure':
# Extract the error details. Skips have no details because its not
# skip like unittest does, instead the runner just bypasses N test.
txt = self._error_text(self._case)
details['error'] = Content(mime_utf8, lambda:[txt])
if self._get_outcome() == 'addSuccess':
# Sucessful tests may have performance metrics.
perflist = list_children(self._case, 'performance')
if perflist:
presults = []
for perf in perflist:
pmin = bool (int (attribute_as_text (perf, 'minimize')))
pmax = bool (int (attribute_as_text (perf, 'maximize')))
pval = float (attribute_as_text (perf, 'value'))
txt = node_as_text (perf)
txt = 'Performance(' + (pmin and 'minimized' or 'maximized'
) + '): ' + txt.strip() + '\n'
presults += [(pval, txt)]
presults.sort()
perf_details = [e[1] for e in presults]
details['performance'] = Content(mime_utf8, lambda:perf_details)
return details
def _get_outcome(self):
if int(attribute_as_text(self._case, 'skipped') + '0'):
return 'addSkip'
outcome = attribute_as_text(self._case, 'result', 'status')
if outcome == 'success':
return 'addSuccess'
else:
return 'addFailure'
def run(self, result):
time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
result.time(time)
result.startTest(self)
try:
outcome = self._get_outcome()
details = self._get_details()
# Only provide a duration IFF outcome == 'addSuccess' - the main
# parser claims bogus results otherwise: in that case emit time as
# zero perhaps.
if outcome == 'addSuccess':
duration = float(node_as_text(self._case, 'duration'))
duration = duration * 1000000
timedelta = datetime.timedelta(0, 0, duration)
time = time + timedelta
result.time(time)
getattr(result, outcome)(self, details=details)
finally:
result.stopTest(self)
# main program handling
def parse_opts():
"""Parse program options.
:return: An options object and the program arguments.
"""
parser = optparse.OptionParser()
parser.version = pkginstall_configvars.get ('glib-version', '0.0-uninstalled')
parser.usage = "%prog [OPTIONS] <gtester-log.xml>"
parser.description = "Generate HTML reports from the XML log files generated by gtester."
parser.epilog = "gtester-report (GLib utils) version %s."% (parser.version,)
parser.add_option("-v", "--version", action="store_true", dest="version", default=False,
help="Show program version.")
parser.add_option("-s", "--subunit", action="store_true", dest="subunit", default=False,
help="Output subunit [See https://launchpad.net/subunit/"
" Needs python-subunit]")
options, files = parser.parse_args()
if options.version:
print parser.epilog
return None, None
if len(files) != 1:
parser.error("Must supply a log file to parse.")
if options.subunit and subunit is None:
parser.error("python-subunit is not installed.")
return options, files
def main():
options, files = parse_opts()
if options is None:
return 0
xd = xml.dom.minidom.parse (files[0])
rr = ReportReader()
rr.trampoline (xd)
if not options.subunit:
HTMLReportWriter(rr.get_info(), rr.binary_list()).printout()
else:
SubunitWriter(rr.get_info(), rr.binary_list()).printout()
if __name__ == '__main__':
main()
| Python |
import sys
import gdb
# Update module path.
dir_ = '/usr/local/share/glib-2.0/gdb'
if not dir_ in sys.path:
sys.path.insert(0, dir_)
from glib import register
register (gdb.current_objfile ())
| Python |
import gdb
# This is not quite right, as local vars may override symname
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_quark_to_string (quark):
if quark == None:
return None
quark = long(quark)
if quark == 0:
return None
val = read_global_var ("g_quarks")
max_q = long(read_global_var ("g_quark_seq_id"))
if quark < max_q:
return val[quark].string()
return None
# We override the node printers too, so that node->next is not expanded
class GListNodePrinter:
"Prints a GList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x, prev=0x%x}" % (str(self.val["data"]), long(self.val["next"]), long(self.val["prev"]))
class GSListNodePrinter:
"Prints a GSList node"
def __init__ (self, val):
self.val = val
def to_string (self):
return "{data=%s, next=0x%x}" % (str(self.val["data"]), long(self.val["next"]))
class GListPrinter:
"Prints a GList"
class _iterator:
def __init__(self, head, listtype):
self.link = head
self.listtype = listtype
self.count = 0
def __iter__(self):
return self
def next(self):
if self.link == 0:
raise StopIteration
data = self.link['data']
self.link = self.link['next']
count = self.count
self.count = self.count + 1
return ('[%d]' % count, data)
def __init__ (self, val, listtype):
self.val = val
self.listtype = listtype
def children(self):
return self._iterator(self.val, self.listtype)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "array"
class GHashPrinter:
"Prints a GHashTable"
class _iterator:
def __init__(self, ht, keys_are_strings):
self.ht = ht
if ht != 0:
self.array = ht["nodes"]
self.size = ht["size"]
self.pos = 0
self.keys_are_strings = keys_are_strings
self.value = None
def __iter__(self):
return self
def next(self):
if self.ht == 0:
raise StopIteration
if self.value != None:
v = self.value
self.value = None
return v
while long(self.pos) < long(self.size):
node = self.array[self.pos]
self.pos = self.pos + 1
if long (node["key_hash"]) >= 2:
key = node["key"]
val = node["value"]
if self.keys_are_strings:
key = key.cast (gdb.lookup_type("char").pointer())
# Queue value for next result
self.value = ('[%dv]'% (self.pos), val)
# Return key
return ('[%dk]'% (self.pos), key)
raise StopIteration
def __init__ (self, val):
self.val = val
self.keys_are_strings = False
try:
string_hash = read_global_var ("g_str_hash")
except:
string_hash = None
if self.val != 0 and string_hash != None and self.val["hash_func"] == string_hash:
self.keys_are_strings = True
def children(self):
return self._iterator(self.val, self.keys_are_strings)
def to_string (self):
return "0x%x" % (long(self.val))
def display_hint (self):
return "map"
def pretty_printer_lookup (val):
if is_g_type_instance (val):
return GTypePrettyPrinter (val)
def pretty_printer_lookup (val):
# None yet, want things like hash table and list
type = val.type.unqualified()
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
t = str(type)
if t == "GList":
return GListPrinter(val, "GList")
if t == "GSList":
return GListPrinter(val, "GSList")
if t == "GHashTable":
return GHashPrinter(val)
else:
t = str(type)
if t == "GList":
return GListNodePrinter(val)
if t == "GSList *":
return GListPrinter(val, "GSList")
return None
def register (obj):
if obj == None:
obj = gdb
obj.pretty_printers.append(pretty_printer_lookup)
class ForeachCommand (gdb.Command):
"""Foreach on list"""
def __init__ (self):
super (ForeachCommand, self).__init__ ("gforeach",
gdb.COMMAND_DATA,
gdb.COMPLETE_SYMBOL)
def valid_name (self, name):
if not name[0].isalpha():
return False
return True
def parse_args (self, arg):
i = arg.find(" ")
if i <= 0:
raise Exception ("No var specified")
var = arg[:i]
if not self.valid_name(var):
raise Exception ("Invalid variable name")
while i < len (arg) and arg[i].isspace():
i = i + 1
if arg[i:i+2] != "in":
raise Exception ("Invalid syntax, missing in")
i = i + 2
while i < len (arg) and arg[i].isspace():
i = i + 1
colon = arg.find (":", i)
if colon == -1:
raise Exception ("Invalid syntax, missing colon")
val = arg[i:colon]
colon = colon + 1
while colon < len (arg) and arg[colon].isspace():
colon = colon + 1
command = arg[colon:]
return (var, val, command)
def do_iter(self, arg, item, command):
item = item.cast (gdb.lookup_type("void").pointer())
item = long(item)
to_eval = "set $%s = (void *)0x%x\n"%(arg, item)
gdb.execute(to_eval)
gdb.execute(command)
def slist_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GSList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def list_iterator (self, arg, container, command):
l = container.cast (gdb.lookup_type("GList").pointer())
while long(l) != 0:
self.do_iter (arg, l["data"], command)
l = l["next"]
def pick_iterator (self, container):
t = container.type.unqualified()
if t.code == gdb.TYPE_CODE_PTR:
t = t.target().unqualified()
t = str(t)
if t == "GSList":
return self.slist_iterator
if t == "GList":
return self.list_iterator
raise Exception("Invalid container type %s"%(str(container.type)))
def invoke (self, arg, from_tty):
(var, container, command) = self.parse_args(arg)
container = gdb.parse_and_eval (container)
func = self.pick_iterator(container)
func(var, container, command)
ForeachCommand ()
| Python |
import sys
import gdb
# Update module path.
dir_ = '/usr/local/share/glib-2.0/gdb'
if not dir_ in sys.path:
sys.path.insert(0, dir_)
from gobject import register
register (gdb.current_objfile ())
| Python |
import gdb
import glib
import gdb.backtrace
import gdb.command.backtrace
# This is not quite right, as local vars may override symname
def read_global_var (symname):
return gdb.selected_frame().read_var(symname)
def g_type_to_name (gtype):
def lookup_fundamental_type (typenode):
if typenode == 0:
return None
val = read_global_var ("static_fundamental_type_nodes")
if val == None:
return None
return val[typenode >> 2].address()
gtype = long(gtype)
typenode = gtype - gtype % 4
if typenode > (255 << 2):
typenode = gdb.Value(typenode).cast (gdb.lookup_type("TypeNode").pointer())
else:
typenode = lookup_fundamental_type (typenode)
if typenode != None:
return glib.g_quark_to_string (typenode["qname"])
return None
def is_g_type_instance (val):
def is_g_type_instance_helper (type):
if str(type) == "GTypeInstance":
return True
while type.code == gdb.TYPE_CODE_TYPEDEF:
type = type.target()
if type.code != gdb.TYPE_CODE_STRUCT:
return False
fields = type.fields()
if len (fields) < 1:
return False
first_field = fields[0]
return is_g_type_instance_helper(first_field.type)
type = val.type
if type.code != gdb.TYPE_CODE_PTR:
return False
type = type.target()
return is_g_type_instance_helper (type)
def g_type_name_from_instance (instance):
if long(instance) != 0:
try:
inst = instance.cast (gdb.lookup_type("GTypeInstance").pointer())
klass = inst["g_class"]
gtype = klass["g_type"]
name = g_type_to_name (gtype)
return name
except RuntimeError:
pass
return None
class GTypePrettyPrinter:
"Prints a GType instance pointer"
def __init__ (self, val):
self.val = val
def to_string (self):
name = g_type_name_from_instance (self.val)
if name:
return ("0x%x [%s]")% (long(self.val), name)
return ("0x%x") % (long(self.val))
def pretty_printer_lookup (val):
if is_g_type_instance (val):
return GTypePrettyPrinter (val)
return None
def get_signal_name (id):
if id == None:
return None
id = long(id)
if id == 0:
return None
val = read_global_var ("g_signal_nodes")
max_s = read_global_var ("g_n_signal_nodes")
max_s = long(max_s)
if id < max_s:
return val[id]["name"].string()
return None
class GFrameWrapper:
def __init__ (self, frame):
self.frame = frame;
def name (self):
name = self.frame.name()
if name and name.startswith("IA__"):
return name[4:]
return name
def __getattr__ (self, name):
return getattr (self.frame, name)
# Monkey patch FrameWrapper to avoid IA__ in symbol names
old__init__ = gdb.command.backtrace.FrameWrapper.__init__
def monkey_patched_init(self, frame):
name = frame.name()
if name and name.startswith("IA__"):
frame = GFrameWrapper(frame)
old__init__(self,frame)
gdb.command.backtrace.FrameWrapper.__init__ = monkey_patched_init
class DummyFrame:
def __init__ (self, frame):
self.frame = frame
def name (self):
return "signal-emission-dummy"
def describe (self, stream, full):
stream.write (" <...>\n")
def __getattr__ (self, name):
return getattr (self.frame, name)
class SignalFrame:
def __init__ (self, frames):
self.frame = frames[-1]
self.frames = frames;
def name (self):
return "signal-emission"
def read_var (self, frame, name, array = None):
try:
v = frame.read_var (name)
if v == None or v.is_optimized_out:
return None
if array != None:
array.append (v)
return v
except ValueError:
return None
def read_object (self, frame, name, array = None):
try:
v = frame.read_var (name)
if v == None or v.is_optimized_out:
return None
v = v.cast (gdb.lookup_type("GObject").pointer())
# Ensure this is a somewhat correct object pointer
if v != None and g_type_name_from_instance (v):
if array != None:
array.append (v)
return v
return None
except ValueError:
return None
def append (self, array, obj):
if obj != None:
array.append (obj)
def or_join_array (self, array):
if len(array) == 0:
return "???"
v = {}
for i in range(len(array)):
v[str(array[i])] = 1
array = v.keys()
s = array[0]
for i in range(1, len(array)):
s = s + " or %s"%array[i]
return s
def describe (self, stream, full):
instances = []
signals = []
for frame in self.frames:
name = frame.name()
if name == "signal_emit_unlocked_R":
self.read_object (frame, "instance", instances)
node = self.read_var (frame, "node")
if node:
signal = node["name"].string()
detail = self.read_var (frame, "detail")
detail = glib.g_quark_to_string (detail)
if detail != None:
signal = signal + ":" + detail
self.append (signals, signal)
if name == "g_signal_emitv":
instance_and_params = self.read_var (frame, "instance_and_params")
if instance_and_params:
instance = instance_and_params[0]["v_pointer"].cast (gdb.Type("GObject").pointer())
self.append (instances, instance)
id = self.read_var (frame, "signal_id")
signal = get_signal_name (id)
if signal:
detail = self.read_var (frame, "detail")
detail = glib.g_quark_to_string (detail)
if detail != None:
signal = signal + ":" + detail
self.append (signals, signal)
if name == "g_signal_emit_valist" or name == "g_signal_emit":
self.read_object (frame, "instance", instances)
id = self.read_var (frame, "signal_id")
signal = get_signal_name (id)
if signal:
detail = self.read_var (frame, "detail")
detail = glib.g_quark_to_string (detail)
if detail != None:
signal = signal + ":" + detail
self.append (signals, signal)
if name == "g_signal_emit_by_name":
self.read_object (frame, "instance", instances)
self.read_var (frame, "detailed_signal", signals)
break
instance = self.or_join_array (instances)
signal = self.or_join_array (signals)
stream.write (" <emit signal %s on instance %s>\n" % (signal, instance))
def __getattr__ (self, name):
return getattr (self.frame, name)
class GFrameFilter:
def __init__ (self, iter):
self.queue = []
self.iter = iter
def __iter__ (self):
return self
def fill (self):
while len(self.queue) <= 6:
try:
f = self.iter.next ()
self.queue.append (f)
except StopIteration:
return
def find_signal_emission (self):
for i in range (min (len(self.queue), 3)):
if self.queue[i].name() == "signal_emit_unlocked_R":
return i
return -1
def next (self):
# Ensure we have enough frames for a full signal emission
self.fill()
# Are we at the end?
if len(self.queue) == 0:
raise StopIteration
emission = self.find_signal_emission ()
if emission > 0:
start = emission
while True:
if start == 0:
break
prev_name = self.queue[start-1].name()
if prev_name.find("_marshal_") or prev_name == "g_closure_invoke":
start = start - 1
else:
break
end = emission + 1
while end < len(self.queue):
if self.queue[end].name() in ["g_signal_emitv",
"g_signal_emit_valist",
"g_signal_emit",
"g_signal_emit_by_name"]:
end = end + 1
else:
break
signal_frames = self.queue[start:end]
new_frames = []
for i in range(len(signal_frames)-1):
new_frames.append(DummyFrame(signal_frames[i]))
new_frames.append(SignalFrame(signal_frames))
self.queue[start:end] = new_frames
return self.queue.pop(0)
def register (obj):
if obj == None:
obj = gdb
gdb.backtrace.push_frame_filter (GFrameFilter)
obj.pretty_printers.append(pretty_printer_lookup)
| Python |
#!/usr/bin/env python
import gobject
import time
import dbus
import dbus.service
import dbus.mainloop.glib
class TestException(dbus.DBusException):
_dbus_error_name = 'com.example.TestException'
class TestService(dbus.service.Object):
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='s', out_signature='s')
def HelloWorld(self, hello_message):
if str(hello_message) == 'Yo':
raise TestException('Yo is not a proper greeting')
else:
return "You greeted me with '%s'. Thanks!"%(str(hello_message))
@dbus.service.method("com.example.Frob",
in_signature='ss', out_signature='ss')
def DoubleHelloWorld(self, hello1, hello2):
return ("You greeted me with '%s'. Thanks!"%(str(hello1)), "Yo dawg, you uttered '%s'. Thanks!"%(str(hello2)))
@dbus.service.method("com.example.Frob",
in_signature='', out_signature='su')
def PairReturn(self):
return ("foo", 42)
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='ybnqiuxtdsog', out_signature='ybnqiuxtdsog')
def TestPrimitiveTypes(self, val_byte, val_boolean, val_int16, val_uint16, val_int32, val_uint32, val_int64, val_uint64, val_double, val_string, val_objpath, val_signature):
return val_byte + 1, not val_boolean, val_int16 + 1, val_uint16 + 1, val_int32 + 1, val_uint32 + 1, val_int64 + 1, val_uint64 + 1, -val_double + 0.123, val_string * 2, val_objpath + "/modified", val_signature * 2
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='ayabanaqaiauaxatad', out_signature='ayabanaqaiauaxatad')
def TestArrayOfPrimitiveTypes(self, val_byte, val_boolean, val_int16, val_uint16, val_int32, val_uint32, val_int64, val_uint64, val_double):
return val_byte*2, val_boolean*2, val_int16*2, val_uint16*2, val_int32*2, val_uint32*2, val_int64*2, val_uint64*2, val_double*2
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='asaoag', out_signature='asaoag')
def TestArrayOfStringTypes(self, val_string, val_objpath, val_signature):
return val_string * 2, val_objpath * 2, val_signature * 2
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature = 'a{yy}a{bb}a{nn}a{qq}a{ii}a{uu}a{xx}a{tt}a{dd}a{ss}a{oo}a{gg}',
out_signature = 'a{yy}a{bb}a{nn}a{qq}a{ii}a{uu}a{xx}a{tt}a{dd}a{ss}a{oo}a{gg}')
def TestHashTables(self, hyy, hbb, hnn, hqq, hii, huu, hxx, htt, hdd, hss, hoo, hgg):
ret_hyy = {}
for i in hyy:
ret_hyy[i*2] = (hyy[i]*3) & 255
ret_hbb = {}
for i in hbb:
ret_hbb[i] = True
ret_hnn = {}
for i in hnn:
ret_hnn[i*2] = hnn[i]*3
ret_hqq = {}
for i in hqq:
ret_hqq[i*2] = hqq[i]*3
ret_hii = {}
for i in hii:
ret_hii[i*2] = hii[i]*3
ret_huu = {}
for i in huu:
ret_huu[i*2] = huu[i]*3
ret_hxx = {}
for i in hxx:
ret_hxx[i + 2] = hxx[i] + 1
ret_htt = {}
for i in htt:
ret_htt[i + 2] = htt[i] + 1
ret_hdd = {}
for i in hdd:
ret_hdd[i + 2.5] = hdd[i] + 5.0
ret_hss = {}
for i in hss:
ret_hss[i + "mod"] = hss[i]*2
ret_hoo = {}
for i in hoo:
ret_hoo[i + "/mod"] = hoo[i] + "/mod2"
ret_hgg = {}
for i in hgg:
ret_hgg[i + "assgit"] = hgg[i]*2
return ret_hyy, ret_hbb, ret_hnn, ret_hqq, ret_hii, ret_huu, ret_hxx, ret_htt, ret_hdd, ret_hss, ret_hoo, ret_hgg
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='(ii)(s(ii)aya{ss})', out_signature='(ii)(s(ii)aya{ss})')
def TestStructureTypes(self, s1, s2):
(x, y) = s1;
(desc, (x1, y1), ay, hss) = s2;
ret_hss = {}
for i in hss:
ret_hss[i] = hss[i] + " ... in bed!"
return (x + 1, y + 1), (desc + " ... in bed!", (x1 + 2, y1 + 2), ay * 2, ret_hss)
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='vb', out_signature='v')
def TestVariant(self, v, modify):
if modify:
if type(v)==dbus.Boolean:
ret = False
elif type(v)==dbus.Dictionary:
ret = {}
for i in v:
ret[i] = v[i] * 2
elif type(v)==dbus.Struct:
ret = ["other struct", dbus.Int16(100)]
else:
ret = v * 2
else:
ret = v
return (type(v))(ret)
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='a(ii)aa(ii)aasaa{ss}aayavaav', out_signature='a(ii)aa(ii)aasaa{ss}aayavaav')
def TestComplexArrays(self, aii, aaii, aas, ahashes, aay, av, aav):
return aii * 2, aaii * 2, aas * 2, ahashes * 2, aay * 2, av *2, aav * 2
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='a{s(ii)}a{sv}a{sav}a{saav}a{sa(ii)}a{sa{ss}}',
out_signature='a{s(ii)}a{sv}a{sav}a{saav}a{sa(ii)}a{sa{ss}}')
def TestComplexHashTables(self, h_str_to_pair, h_str_to_variant, h_str_to_av, h_str_to_aav,
h_str_to_array_of_pairs, hash_of_hashes):
ret_h_str_to_pair = {}
for i in h_str_to_pair:
ret_h_str_to_pair[i + "_baz"] = h_str_to_pair[i]
ret_h_str_to_variant = {}
for i in h_str_to_variant:
ret_h_str_to_variant[i + "_baz"] = h_str_to_variant[i]
return ret_h_str_to_pair, ret_h_str_to_variant, h_str_to_av, h_str_to_aav, h_str_to_array_of_pairs, hash_of_hashes
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='', out_signature='')
def Quit(self):
mainloop.quit()
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='sv', out_signature='')
def FrobSetProperty(self, prop_name, prop_value):
self.frob_props[prop_name] = prop_value
message = dbus.lowlevel.SignalMessage("/com/example/TestObject",
"org.freedesktop.DBus.Properties",
"PropertiesChanged")
message.append("com.example.Frob")
message.append({prop_name : prop_value})
message.append([], signature="as")
session_bus.send_message(message)
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob",
in_signature='', out_signature='')
def FrobInvalidateProperty(self):
self.frob_props["PropertyThatWillBeInvalidated"] = "OMGInvalidated"
message = dbus.lowlevel.SignalMessage("/com/example/TestObject",
"org.freedesktop.DBus.Properties",
"PropertiesChanged")
message.append("com.example.Frob")
message.append({}, signature="a{sv}")
message.append(["PropertyThatWillBeInvalidated"])
session_bus.send_message(message)
# ----------------------------------------------------------------------------------------------------
@dbus.service.signal("com.example.Frob",
signature="sov")
def TestSignal(self, str1, objpath1, variant1):
pass
@dbus.service.method("com.example.Frob",
in_signature='so', out_signature='')
def EmitSignal(self, str1, objpath1):
self.TestSignal (str1 + " .. in bed!", objpath1 + "/in/bed", "a variant")
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("com.example.Frob", in_signature='i', out_signature='',
async_callbacks=('return_cb', 'raise_cb'))
def Sleep(self, msec, return_cb, raise_cb):
def return_from_async_wait():
return_cb()
return False
gobject.timeout_add(msec, return_from_async_wait)
# ----------------------------------------------------------------------------------------------------
@dbus.service.method("org.freedesktop.DBus.Properties",
in_signature = 'ss',
out_signature = 'v')
def Get(self, interface_name, property_name):
if interface_name == "com.example.Frob":
return self.frob_props[property_name]
else:
raise TestException("No such interface " + interface_name)
@dbus.service.method("org.freedesktop.DBus.Properties",
in_signature = 's',
out_signature = 'a{sv}')
def GetAll(self, interface_name):
if interface_name == "com.example.Frob":
return self.frob_props
else:
raise TestException("No such interface " + interface_name)
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
session_bus = dbus.SessionBus()
name = dbus.service.BusName("com.example.TestService", session_bus)
obj = TestService(session_bus, '/com/example/TestObject')
#print "Our unique name is %s"%(session_bus.get_unique_name())
obj.frob_props = {}
obj.frob_props["y"] = dbus.Byte(1)
obj.frob_props["b"] = dbus.Boolean(True)
obj.frob_props["n"] = dbus.Int16(2)
obj.frob_props["q"] = dbus.UInt16(3)
obj.frob_props["i"] = dbus.Int32(4)
obj.frob_props["u"] = dbus.UInt32(5)
obj.frob_props["x"] = dbus.Int64(6)
obj.frob_props["t"] = dbus.UInt64(7)
obj.frob_props["d"] = dbus.Double(7.5)
obj.frob_props["s"] = dbus.String("a string")
obj.frob_props["o"] = dbus.ObjectPath("/some/path")
obj.frob_props["ay"] = [dbus.Byte(1), dbus.Byte(11)]
obj.frob_props["ab"] = [dbus.Boolean(True), dbus.Boolean(False)]
obj.frob_props["an"] = [dbus.Int16(2), dbus.Int16(12)]
obj.frob_props["aq"] = [dbus.UInt16(3), dbus.UInt16(13)]
obj.frob_props["ai"] = [dbus.Int32(4), dbus.Int32(14)]
obj.frob_props["au"] = [dbus.UInt32(5), dbus.UInt32(15)]
obj.frob_props["ax"] = [dbus.Int64(6), dbus.Int64(16)]
obj.frob_props["at"] = [dbus.UInt64(7), dbus.UInt64(17)]
obj.frob_props["ad"] = [dbus.Double(7.5), dbus.Double(17.5)]
obj.frob_props["as"] = [dbus.String("a string"), dbus.String("another string")]
obj.frob_props["ao"] = [dbus.ObjectPath("/some/path"), dbus.ObjectPath("/another/path")]
obj.frob_props["foo"] = "a frobbed string"
obj.frob_props["PropertyThatWillBeInvalidated"] = "InitialValue"
mainloop = gobject.MainLoop()
mainloop.run()
| Python |
#!/usr/bin/env python
# GDBus - GLib D-Bus Library
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: David Zeuthen <davidz@redhat.com>
import os
import sys
srcdir = os.getenv('UNINSTALLED_GLIB_SRCDIR', None)
if srcdir is not None:
path = os.path.join(srcdir, 'gio', 'gdbus-2.0')
elif os.name == 'nt':
# Makes gdbus-codegen 'relocatable' at runtime on Windows.
path = os.path.join(os.path.dirname(__file__), '..', 'lib', 'gdbus-2.0')
else:
path = os.path.join('@libdir@', 'gdbus-2.0')
sys.path.insert(0, os.path.abspath(path))
from codegen import codegen_main
sys.exit(codegen_main.codegen_main())
| Python |
# -*- Mode: Python -*-
# GDBus - GLib D-Bus Library
#
# Copyright (C) 2008-2011 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307, USA.
#
# Author: David Zeuthen <davidz@redhat.com>
import sys
import re
from . import config
from . import utils
from . import dbustypes
from . import parser
# ----------------------------------------------------------------------------------------------------
class DocbookCodeGenerator:
def __init__(self, ifaces, docbook):
self.ifaces = ifaces
self.docbook = docbook
self.generate_expand_dicts()
def print_method_prototype(self, i, m, in_synopsis):
max_method_len = 0
if in_synopsis:
for _m in i.methods:
max_method_len = max(len(_m.name), max_method_len)
else:
max_method_len = max(len(m.name), max_method_len)
max_signature_len = 0
if in_synopsis:
for _m in i.methods:
for a in _m.in_args:
max_signature_len = max(len(a.signature), max_signature_len)
for a in _m.out_args:
max_signature_len = max(len(a.signature), max_signature_len)
else:
for a in m.in_args:
max_signature_len = max(len(a.signature), max_signature_len)
for a in m.out_args:
max_signature_len = max(len(a.signature), max_signature_len)
if in_synopsis:
self.out.write('<link linkend="gdbus-method-%s.%s">%s</link>%*s ('
%(utils.dots_to_hyphens(i.name), m.name, m.name, max_method_len - len(m.name), ''))
else:
self.out.write('%s%*s ('
%(m.name, max_method_len - len(m.name), ''))
count = 0
for a in m.in_args:
if (count > 0):
self.out.write(',\n%*s'%(max_method_len + 2, ''))
self.out.write('IN %s%*s %s'%(a.signature, max_signature_len - len(a.signature), '', a.name))
count = count + 1
for a in m.out_args:
if (count > 0):
self.out.write(',\n%*s'%(max_method_len + 2, ''))
self.out.write('OUT %s%*s %s'%(a.signature, max_signature_len - len(a.signature), '', a.name))
count = count + 1
self.out.write(');\n')
def print_signal_prototype(self, i, s, in_synopsis):
max_signal_len = 0
if in_synopsis:
for _s in i.signals:
max_signal_len = max(len(_s.name), max_signal_len)
else:
max_signal_len = max(len(s.name), max_signal_len)
max_signature_len = 0
if in_synopsis:
for _s in i.signals:
for a in _s.args:
max_signature_len = max(len(a.signature), max_signature_len)
else:
for a in s.args:
max_signature_len = max(len(a.signature), max_signature_len)
if in_synopsis:
self.out.write('<link linkend="gdbus-signal-%s.%s">%s</link>%*s ('
%(utils.dots_to_hyphens(i.name), s.name, s.name, max_signal_len - len(s.name), ''))
else:
self.out.write('%s%*s ('
%(s.name, max_signal_len - len(s.name), ''))
count = 0
for a in s.args:
if (count > 0):
self.out.write(',\n%*s'%(max_signal_len + 2, ''))
self.out.write('%s%*s %s'%(a.signature, max_signature_len - len(a.signature), '', a.name))
count = count + 1
self.out.write(');\n')
def print_property_prototype(self, i, p, in_synopsis):
max_property_len = 0
if in_synopsis:
for _p in i.properties:
max_property_len = max(len(_p.name), max_property_len)
else:
max_property_len = max(len(p.name), max_property_len)
max_signature_len = 0
if in_synopsis:
for _p in i.properties:
max_signature_len = max(len(_p.signature), max_signature_len)
else:
max_signature_len = max(len(p.signature), max_signature_len)
if in_synopsis:
self.out.write('<link linkend="gdbus-property-%s.%s">%s</link>%*s'
%(utils.dots_to_hyphens(i.name), p.name, p.name, max_property_len - len(p.name), ''))
else:
self.out.write('%s%*s'
%(p.name, max_property_len - len(p.name), ''))
if p.readable and p.writable:
access = 'readwrite'
elif p.readable:
access = 'readable '
else:
access = 'writable '
self.out.write(' %s %s\n'%(access, p.signature))
def print_synopsis_methods(self, i):
self.out.write(' <refsynopsisdiv role="synopsis">\n'%())
self.out.write(' <title role="synopsis.title">Methods</title>\n'%())
self.out.write(' <synopsis>\n'%())
for m in i.methods:
self.print_method_prototype(i, m, in_synopsis=True)
self.out.write('</synopsis>\n'%())
self.out.write(' </refsynopsisdiv>\n'%())
def print_synopsis_signals(self, i):
self.out.write(' <refsect1 role="signal_proto">\n'%())
self.out.write(' <title role="signal_proto.title">Signals</title>\n'%())
self.out.write(' <synopsis>\n'%())
for s in i.signals:
self.print_signal_prototype(i, s, in_synopsis=True)
self.out.write('</synopsis>\n'%())
self.out.write(' </refsect1>\n'%())
def print_synopsis_properties(self, i):
self.out.write(' <refsect1 role="properties">\n'%())
self.out.write(' <title role="properties.title">Properties</title>\n'%())
self.out.write(' <synopsis>\n'%())
for p in i.properties:
self.print_property_prototype(i, p, in_synopsis=True)
self.out.write('</synopsis>\n'%())
self.out.write(' </refsect1>\n'%())
def print_method(self, i, m):
self.out.write('<refsect2 role="method" id="gdbus-method-%s.%s">\n'%(utils.dots_to_hyphens(i.name), m.name))
self.out.write(' <title>The %s() method</title>\n'%(m.name))
self.out.write(' <indexterm zone="gdbus-method-%s.%s"><primary sortas="%s.%s">%s.%s()</primary></indexterm>\n'%(utils.dots_to_hyphens(i.name), m.name, i.name_without_prefix, m.name, i.name, m.name))
self.out.write('<programlisting>\n')
self.print_method_prototype(i, m, in_synopsis=False)
self.out.write('</programlisting>\n')
self.out.write('<para>%s</para>\n'%(self.expand(m.doc_string, True)))
self.out.write('<variablelist role="params">\n')
for a in m.in_args:
self.out.write('<varlistentry>\n'%())
self.out.write(' <term><literal>IN %s <parameter>%s</parameter></literal>:</term>\n'%(a.signature, a.name))
self.out.write(' <listitem><para>%s</para></listitem>\n'%(self.expand(a.doc_string, True)))
self.out.write('</varlistentry>\n'%())
for a in m.out_args:
self.out.write('<varlistentry>\n'%())
self.out.write(' <term><literal>OUT %s <parameter>%s</parameter></literal>:</term>\n'%(a.signature, a.name))
self.out.write(' <listitem><para>%s</para></listitem>\n'%(self.expand(a.doc_string, True)))
self.out.write('</varlistentry>\n'%())
self.out.write('</variablelist>\n')
if len(m.since) > 0:
self.out.write('<para role="since">Since %s</para>\n'%(m.since))
if m.deprecated:
self.out.write('<warning><para>The %s() method is deprecated.</para></warning>'%(m.name))
self.out.write('</refsect2>\n')
def print_signal(self, i, s):
self.out.write('<refsect2 role="signal" id="gdbus-signal-%s.%s">\n'%(utils.dots_to_hyphens(i.name), s.name))
self.out.write(' <title>The "%s" signal</title>\n'%(s.name))
self.out.write(' <indexterm zone="gdbus-signal-%s.%s"><primary sortas="%s::%s">%s::%s</primary></indexterm>\n'%(utils.dots_to_hyphens(i.name), s.name, i.name_without_prefix, s.name, i.name, s.name))
self.out.write('<programlisting>\n')
self.print_signal_prototype(i, s, in_synopsis=False)
self.out.write('</programlisting>\n')
self.out.write('<para>%s</para>\n'%(self.expand(s.doc_string, True)))
self.out.write('<variablelist role="params">\n')
for a in s.args:
self.out.write('<varlistentry>\n'%())
self.out.write(' <term><literal>%s <parameter>%s</parameter></literal>:</term>\n'%(a.signature, a.name))
self.out.write(' <listitem><para>%s</para></listitem>\n'%(self.expand(a.doc_string, True)))
self.out.write('</varlistentry>\n'%())
self.out.write('</variablelist>\n')
if len(s.since) > 0:
self.out.write('<para role="since">Since %s</para>\n'%(s.since))
if s.deprecated:
self.out.write('<warning><para>The "%s" signal is deprecated.</para></warning>'%(s.name))
self.out.write('</refsect2>\n')
def print_property(self, i, p):
self.out.write('<refsect2 role="property" id="gdbus-property-%s.%s">\n'%(utils.dots_to_hyphens(i.name), p.name))
self.out.write(' <title>The "%s" property</title>\n'%(p.name))
self.out.write(' <indexterm zone="gdbus-property-%s.%s"><primary sortas="%s:%s">%s:%s</primary></indexterm>\n'%(utils.dots_to_hyphens(i.name), p.name, i.name_without_prefix, p.name, i.name, p.name))
self.out.write('<programlisting>\n')
self.print_property_prototype(i, p, in_synopsis=False)
self.out.write('</programlisting>\n')
self.out.write('<para>%s</para>\n'%(self.expand(p.doc_string, True)))
if len(p.since) > 0:
self.out.write('<para role="since">Since %s</para>\n'%(p.since))
if p.deprecated:
self.out.write('<warning><para>The "%s" property is deprecated.</para></warning>'%(p.name))
self.out.write('</refsect2>\n')
def expand(self, s, expandParamsAndConstants):
for key in self.expand_member_dict_keys:
s = s.replace(key, self.expand_member_dict[key])
for key in self.expand_iface_dict_keys:
s = s.replace(key, self.expand_iface_dict[key])
if expandParamsAndConstants:
# replace @foo with <parameter>foo</parameter>
s = re.sub('@[a-zA-Z0-9_]*', lambda m: '<parameter>' + m.group(0)[1:] + '</parameter>', s)
# replace e.g. %TRUE with <constant>TRUE</constant>
s = re.sub('%[a-zA-Z0-9_]*', lambda m: '<constant>' + m.group(0)[1:] + '</constant>', s)
return s
def generate_expand_dicts(self):
self.expand_member_dict = {}
self.expand_iface_dict = {}
for i in self.ifaces:
key = '#%s'%(i.name)
value = '<link linkend="gdbus-interface-%s.top_of_page">%s</link>'%(utils.dots_to_hyphens(i.name), i.name)
self.expand_iface_dict[key] = value
for m in i.methods:
key = '%s.%s()'%(i.name, m.name)
value = '<link linkend="gdbus-method-%s.%s">%s()</link>'%(utils.dots_to_hyphens(i.name), m.name, m.name)
self.expand_member_dict[key] = value
for s in i.signals:
key = '#%s::%s'%(i.name, s.name)
value = '<link linkend="gdbus-signal-%s.%s">"%s"</link>'%(utils.dots_to_hyphens(i.name), s.name, s.name)
self.expand_member_dict[key] = value
for p in i.properties:
key = '#%s:%s'%(i.name, p.name)
value = '<link linkend="gdbus-property-%s.%s">"%s"</link>'%(utils.dots_to_hyphens(i.name), p.name, p.name)
self.expand_member_dict[key] = value
# Make sure to expand the keys in reverse order so e.g. #org.foo.Iface:MediaCompat
# is evaluated before #org.foo.Iface:Media ...
self.expand_member_dict_keys = self.expand_member_dict.keys()
self.expand_member_dict_keys.sort(reverse=True)
self.expand_iface_dict_keys = self.expand_iface_dict.keys()
self.expand_iface_dict_keys.sort(reverse=True)
def generate(self):
for i in self.ifaces:
self.out = file('%s-%s.xml'%(self.docbook, i.name), 'w')
self.out.write(''%())
self.out.write('<?xml version="1.0" encoding="utf-8"?>\n'%())
self.out.write('<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"\n'%())
self.out.write(' "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [\n'%())
self.out.write(']>\n'%())
self.out.write('<refentry id="gdbus-%s">\n'%(i.name))
self.out.write(' <refmeta>'%())
self.out.write(' <refentrytitle role="top_of_page" id="gdbus-interface-%s.top_of_page">%s</refentrytitle>\n'%(utils.dots_to_hyphens(i.name), i.name))
self.out.write(' <indexterm zone="gdbus-interface-%s.top_of_page"><primary sortas="%s">%s</primary></indexterm>\n'%(utils.dots_to_hyphens(i.name), i.name_without_prefix, i.name))
self.out.write(' </refmeta>'%())
self.out.write(' <refnamediv>'%())
self.out.write(' <refname>%s</refname>'%(i.name))
self.out.write(' <refpurpose>%s</refpurpose>'%(i.doc_string_brief))
self.out.write(' </refnamediv>'%())
if len(i.methods) > 0:
self.print_synopsis_methods(i)
if len(i.signals) > 0:
self.print_synopsis_signals(i)
if len(i.properties) > 0:
self.print_synopsis_properties(i)
self.out.write('<refsect1 role="desc" id="gdbus-interface-%s">\n'%(utils.dots_to_hyphens(i.name)))
self.out.write(' <title role="desc.title">Description</title>\n'%())
self.out.write(' <para>%s</para>\n'%(self.expand(i.doc_string, True)))
if len(i.since) > 0:
self.out.write(' <para role="since">Since %s</para>\n'%(i.since))
if i.deprecated:
self.out.write('<warning><para>The %s interface is deprecated.</para></warning>'%(i.name))
self.out.write('</refsect1>\n'%())
if len(i.methods) > 0:
self.out.write('<refsect1 role="details" id="gdbus-methods-%s">\n'%(i.name))
self.out.write(' <title role="details.title">Method Details</title>\n'%())
for m in i.methods:
self.print_method(i, m)
self.out.write('</refsect1>\n'%())
if len(i.signals) > 0:
self.out.write('<refsect1 role="details" id="gdbus-signals-%s">\n'%(i.name))
self.out.write(' <title role="details.title">Signal Details</title>\n'%())
for s in i.signals:
self.print_signal(i, s)
self.out.write('</refsect1>\n'%())
if len(i.properties) > 0:
self.out.write('<refsect1 role="details" id="gdbus-properties-%s">\n'%(i.name))
self.out.write(' <title role="details.title">Property Details</title>\n'%())
for s in i.properties:
self.print_property(i, s)
self.out.write('</refsect1>\n'%())
self.out.write('</refentry>\n')
self.out.write('\n')
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.