code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Virtual file system for managing files locally or in the cloud."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import jinja2
class AbstractReadOnlyFileSystem(object):
"""A generic ro file system interface that forwards to an implementation."""
def __init__(self, impl):
self._impl = impl
def isfile(self, filename):
"""Checks if file exists, similar to os.path.isfile(...)."""
return self._impl.isfile(filename)
def open(self, filename):
"""Returns a stream with the file content, similar to open(...)."""
return self._impl.open(filename)
def list(self, dir_name):
"""Lists all files in a directory."""
return self._impl.list(dir_name)
def get_jinja_environ(self):
"""Configures jinja environment loaders for this file system."""
return self._impl.get_jinja_environ()
class LocalReadOnlyFileSystem(object):
"""A ro file system serving only local files."""
def __init__(self, logical_home_folder=None, physical_home_folder=None):
"""Create a new instance of the object.
Args:
logical_home_folder: A logical home dir of all files (/a/b/c/...).
physical_home_folder: A physical location on the file system (/x/y).
Returns:
A new instance of the object.
"""
self._logical_home_folder = logical_home_folder
self._physical_home_folder = physical_home_folder
def _logical_to_physical(self, filename):
if not (self._logical_home_folder and self._physical_home_folder):
return filename
return os.path.join(
self._physical_home_folder,
os.path.relpath(filename, self._logical_home_folder))
def _physical_to_logical(self, filename):
if not (self._logical_home_folder and self._physical_home_folder):
return filename
return os.path.join(
self._logical_home_folder,
os.path.relpath(filename, self._physical_home_folder))
def isfile(self, filename):
return os.path.isfile(self._logical_to_physical(filename))
def open(self, filename):
return open(self._logical_to_physical(filename), 'rb')
def list(self, root_dir):
"""Lists all files in a directory."""
files = []
for dirname, unused_dirnames, filenames in os.walk(
self._logical_to_physical(root_dir)):
for filename in filenames:
files.append(
self._physical_to_logical(os.path.join(dirname, filename)))
return sorted(files)
def get_jinja_environ(self, dir_names):
physical_dir_names = []
for dir_name in dir_names:
physical_dir_names.append(self._logical_to_physical(dir_name))
return jinja2.Environment(
extensions=['jinja2.ext.i18n'],
loader=jinja2.FileSystemLoader(physical_dir_names))
def run_all_unit_tests():
"""Runs all unit tests in the project."""
if __name__ == '__main__':
run_all_unit_tests()
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enforces schema and verifies course files for referential integrity.
Use this script to verify referential integrity of your course definition files
before you import them into the production instance of Google AppEngine.
Here is how to use the script:
- prepare your course files
- edit the data/unit.csv file
- edit the data/lesson.csv file
- edit the assets/js/activity-*.*.js files
- edit the assets/js/assessment-*.js files
- run the script from a command line by navigating to the root
directory of the app and then typing "python tools/verify.py"
- review the report printed to the console for errors and warnings
Good luck!
"""
import csv
import json
import os
import re
import sys
BOOLEAN = object()
STRING = object()
FLOAT = object()
INTEGER = object()
CORRECT = object()
REGEX = object()
SCHEMA = {
'assessment': {
'assessmentName': STRING,
'preamble': STRING,
'checkAnswers': BOOLEAN,
'questionsList': [{
'questionHTML': STRING,
'lesson': STRING,
'choices': [STRING, CORRECT],
'correctAnswerNumeric': FLOAT,
'correctAnswerString': STRING,
'correctAnswerRegex': REGEX}]
}, 'activity': [
STRING,
{
'questionType': 'multiple choice',
'choices': [[STRING, BOOLEAN, STRING]]
}, {
'questionType': 'multiple choice group',
'questionsList': [{
'questionHTML': STRING,
'choices': [STRING],
'correctIndex': INTEGER}],
'allCorrectOutput': STRING,
'someIncorrectOutput': STRING
}, {
'questionType': 'freetext',
'correctAnswerRegex': REGEX,
'correctAnswerOutput': STRING,
'incorrectAnswerOutput': STRING,
'showAnswerOutput': STRING,
'showAnswerPrompt': STRING,
'outputHeight': STRING
}]}
UNITS_HEADER = (
'id,type,unit_id,title,release_date,now_available')
LESSONS_HEADER = (
'unit_id,unit_title,lesson_id,lesson_title,lesson_activity,'
'lesson_activity_name,lesson_notes,lesson_video_id,lesson_objectives')
UNIT_CSV_TO_DB_CONVERTER = {
'id': ('id', int),
'type': ('type', unicode),
'unit_id': ('unit_id', unicode),
'title': ('title', unicode),
'release_date': ('release_date', unicode),
'now_available': ('now_available', bool)
}
LESSON_CSV_TO_DB_CONVERTER = {
'unit_id': ('unit_id', int),
# Field 'unit_title' is a duplicate of Unit.title. We enforce that both
# values are the same and ignore this value altogether.
'unit_title': None,
'lesson_id': ('id', int),
'lesson_title': ('title', unicode),
'lesson_activity': ('activity', unicode),
'lesson_activity_name': ('activity_title', unicode),
'lesson_video_id': ('video', unicode),
'lesson_objectives': ('objectives', unicode),
'lesson_notes': ('notes', unicode)
}
# pylint: disable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_OPEN = '<gcb-no-verify>\s*\n'
# pylint: enable-msg=anomalous-backslash-in-string
NO_VERIFY_TAG_NAME_CLOSE = '</gcb-no-verify>'
OUTPUT_FINE_LOG = False
OUTPUT_DEBUG_LOG = False
class Term(object):
def __init__(self, term_type, value=None):
self.term_type = term_type
self.value = value
def __eq__(self, other):
if type(other) is not Term:
return False
else:
return ((self.term_type == other.term_type) and
(self.value == other.value))
class SchemaException(Exception):
"""A class to represent a schema error."""
def format_primitive_value_name(self, name):
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == BOOLEAN:
return 'BOOLEAN'
return name
def format_primitive_type_name(self, name):
"""Formats a name for a primitive type."""
if name == BOOLEAN:
return 'BOOLEAN'
if name == REGEX:
return 'REGEX(...)'
if name == CORRECT:
return 'CORRECT(...)'
if name == STRING or isinstance(name, str):
return 'STRING'
if name == FLOAT:
return 'FLOAT'
if name == INTEGER:
return 'INTEGER'
if isinstance(name, dict):
return '{...}'
if isinstance(name, list):
return '[...]'
return 'Unknown type name \'%s\'' % name.__class__.__name__
def format_type_names(self, names):
if isinstance(names, list):
captions = []
for name in names:
captions.append(self.format_primitive_type_name(name))
return captions
else:
return self.format_primitive_type_name(names)
def __init__(self, message, value=None, types=None, path=None):
prefix = ''
if path:
prefix = 'Error at %s\n' % path
if types:
if value:
message = prefix + message % (
self.format_primitive_value_name(value),
self.format_type_names(types))
else:
message = prefix + message % self.format_type_names(types)
else:
if value:
message = prefix + (
message % self.format_primitive_value_name(value))
else:
message = prefix + message
super(SchemaException, self).__init__(message)
class Context(object):
""""A class that manages a stack of traversal contexts."""
def __init__(self):
self.parent = None
self.path = ['/']
def new(self, names):
""""Derives a new context from the current one."""
context = Context()
context.parent = self
context.path = list(self.path)
if names:
if isinstance(names, list):
for name in names:
if name:
context.path.append('/' + '%s' % name)
else:
context.path.append('/' + '%s' % names)
return context
def format_path(self):
"""Formats the canonical name of this context."""
return ''.join(self.path)
class SchemaHelper(object):
"""A class that knows how to apply the schema."""
def __init__(self):
self.type_stats = {}
def visit_element(self, atype, value, context, is_terminal=True):
"""Callback for each schema element being traversed."""
if atype in self.type_stats:
count = self.type_stats[atype]
else:
count = 0
self.type_stats[atype] = count + 1
if is_terminal:
self.parse_log.append(' TERMINAL: %s %s = %s' % (
atype, context.format_path(), value))
else:
self.parse_log.append(' NON-TERMINAL: %s %s' % (
atype, context.format_path()))
def extract_all_terms_to_depth(self, key, values, type_map):
"""Walks schema type map recursively to depth."""
# Walks schema type map recursively to depth and creates a list of all
# possible {key: value} pairs. The latter is a list of all non-terminal
# and terminal terms allowed in the schema. The list of terms from this
# method can be bound to an execution context for evaluating whether a
# given instance's map complies with the schema.
if key:
type_map.update({key: key})
if values == REGEX:
type_map.update({'regex': lambda x: Term(REGEX, x)})
return
if values == CORRECT:
type_map.update({'correct': lambda x: Term(CORRECT, x)})
return
if values == BOOLEAN:
type_map.update(
{'true': Term(BOOLEAN, True), 'false': Term(BOOLEAN, False)})
return
if values == STRING or values == INTEGER:
return
if isinstance(values, dict):
for new_key, new_value in values.items():
self.extract_all_terms_to_depth(new_key, new_value, type_map)
return
if isinstance(values, list):
for new_value in values:
self.extract_all_terms_to_depth(None, new_value, type_map)
return
def find_selectors(self, type_map):
"""Finds all type selectors."""
# Finds all elements in the type map where both a key and a value are
# strings. These elements are used to find one specific type map among
# several alternative type maps.
selector = {}
for akey, avalue in type_map.items():
if isinstance(akey, str) and isinstance(avalue, str):
selector.update({akey: avalue})
return selector
def find_compatible_dict(self, value_map, type_map, unused_context):
"""Find the type map most compatible with the value map."""
# A value map is considered compatible with a type map when former
# contains the same key names and the value types as the type map.
# special case when we have just one type; check name and type are the
# same
if len(type_map) == 1:
for value_key in value_map.keys():
for key in type_map[0].keys():
if value_key == key:
return key, type_map[0]
raise SchemaException(
"Expected: '%s'\nfound: %s", type_map[0].keys()[0], value_map)
# case when we have several types to choose from
for adict in type_map:
dict_selector = self.find_selectors(adict)
for akey, avalue in dict_selector.items():
if value_map[akey] == avalue:
return akey, adict
return None, None
def check_single_value_matches_type(self, value, atype, context):
"""Checks if a single value matches a specific (primitive) type."""
if atype == BOOLEAN:
if isinstance(value, bool) or value.term_type == BOOLEAN:
self.visit_element('BOOLEAN', value, context)
return True
else:
raise SchemaException(
'Expected: \'true\' or \'false\'\nfound: %s', value)
if isinstance(atype, str):
if isinstance(value, str):
self.visit_element('str', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == STRING:
if isinstance(value, str):
self.visit_element('STRING', value, context)
return True
else:
raise SchemaException('Expected: \'string\'\nfound: %s', value)
if atype == REGEX and value.term_type == REGEX:
self.visit_element('REGEX', value, context)
return True
if atype == CORRECT and value.term_type == CORRECT:
self.visit_element('CORRECT', value, context)
return True
if atype == FLOAT:
if is_number(value):
self.visit_element('NUMBER', value, context)
return True
else:
raise SchemaException('Expected: \'number\'\nfound: %s', value)
if atype == INTEGER:
if is_integer(value):
self.visit_element('INTEGER', value, context)
return True
else:
raise SchemaException(
'Expected: \'integer\'\nfound: %s', value,
path=context.format_path())
raise SchemaException(
'Unexpected value \'%s\'\n'
'for type %s', value, atype, path=context.format_path())
def check_value_list_matches_type(self, value, atype, context):
"""Checks if all items in value list match a specific type."""
for value_item in value:
found = False
for atype_item in atype:
if isinstance(atype_item, list):
for atype_item_item in atype_item:
if self.does_value_match_type(
value_item, atype_item_item, context):
found = True
break
else:
if self.does_value_match_type(
value_item, atype_item, context):
found = True
break
if not found:
raise SchemaException(
'Expected: \'%s\'\nfound: %s', atype, value)
return True
def check_value_matches_type(self, value, atype, context):
"""Checks if single value or a list of values match a specific type."""
if isinstance(atype, list) and isinstance(value, list):
return self.check_value_list_matches_type(value, atype, context)
else:
return self.check_single_value_matches_type(value, atype, context)
def does_value_match_type(self, value, atype, context):
"""Same as other method, but does not throw an exception."""
try:
return self.check_value_matches_type(value, atype, context)
except SchemaException:
return False
def does_value_match_one_of_types(self, value, types, context):
"""Checks if a value matches to one of the types in the list."""
type_names = None
if isinstance(types, list):
type_names = types
if type_names:
for i in range(0, len(type_names)):
if self.does_value_match_type(value, type_names[i], context):
return True
return False
def does_value_match_map_of_type(self, value, types, context):
"""Checks if value matches any variation of {...} type."""
# find all possible map types
maps = []
for atype in types:
if isinstance(atype, dict):
maps.append(atype)
if not maps and isinstance(types, dict):
maps.append(types)
# check if the structure of value matches one of the maps
if isinstance(value, dict):
aname, adict = self.find_compatible_dict(value, maps, context)
if adict:
self.visit_element('dict', value, context.new(aname), False)
for akey, avalue in value.items():
if akey not in adict:
raise SchemaException(
'Unknown term \'%s\'', akey,
path=context.format_path())
self.check_value_of_valid_type(
avalue, adict[akey], context.new([aname, akey]))
return True
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return False
def format_name_with_index(self, alist, aindex):
"""A function to format a context name with an array element index."""
if len(alist) == 1:
return ''
else:
return '[%s]' % aindex
def does_value_match_list_of_types_in_order(
self, value, types, context, target):
"""Iterates the value and types in given order and checks for match."""
all_values_are_lists = True
for avalue in value:
if not isinstance(avalue, list):
all_values_are_lists = False
if all_values_are_lists:
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], types, context.new(
self.format_name_with_index(value, i)), True)
else:
if len(target) != len(value):
raise SchemaException(
'Expected: \'%s\' values\n' + 'found: %s.' % value,
len(target), path=context.format_path())
for i in range(0, len(value)):
self.check_value_of_valid_type(value[i], target[i], context.new(
self.format_name_with_index(value, i)))
return True
def does_value_match_list_of_types_any_order(self, value, types,
context, lists):
"""Iterates the value and types, checks if they match in any order."""
target = lists
if not target:
if not isinstance(types, list):
raise SchemaException(
'Unsupported type %s',
None, types, path=context.format_path())
target = types
for i in range(0, len(value)):
found = False
for atarget in target:
try:
self.check_value_of_valid_type(
value[i], atarget,
context.new(self.format_name_with_index(value, i)))
found = True
break
except SchemaException as unused_e:
continue
if not found:
raise SchemaException(
'The value:\n %s\n'
'is incompatible with expected type(s):\n %s',
value, types, path=context.format_path())
return True
def does_value_match_list_of_type(self, value, types, context, in_order):
"""Checks if a value matches a variation of [...] type."""
# Extra argument controls whether matching must be done in a specific
# or in any order. A specific order is demanded by [[...]]] construct,
# i.e. [[STRING, INTEGER, BOOLEAN]], while sub elements inside {...} and
# [...] can be matched in any order.
# prepare a list of list types
lists = []
for atype in types:
if isinstance(atype, list):
lists.append(atype)
if len(lists) > 1:
raise SchemaException(
'Unable to validate types with multiple alternative '
'lists %s', None, types, path=context.format_path())
if isinstance(value, list):
if len(lists) > 1:
raise SchemaException(
'Allowed at most one list\nfound: %s.',
None, types, path=context.format_path())
# determine if list is in order or not as hinted by double array
# [[..]]; [STRING, NUMBER] is in any order, but [[STRING, NUMBER]]
# demands order
ordered = len(lists) == 1 and isinstance(types, list)
if in_order or ordered:
return self.does_value_match_list_of_types_in_order(
value, types, context, lists[0])
else:
return self.does_value_match_list_of_types_any_order(
value, types, context, lists)
return False
def check_value_of_valid_type(self, value, types, context, in_order=None):
"""Check if a value matches any of the given types."""
if not (isinstance(types, list) or isinstance(types, dict)):
self.check_value_matches_type(value, types, context)
return
if (self.does_value_match_list_of_type(value, types,
context, in_order) or
self.does_value_match_map_of_type(value, types, context) or
self.does_value_match_one_of_types(value, types, context)):
return
raise SchemaException(
'Unknown type %s', value, path=context.format_path())
def check_instances_match_schema(self, values, types, name):
"""Recursively decompose 'values' to see if they match schema types."""
self.parse_log = []
context = Context().new(name)
self.parse_log.append(' ROOT %s' % context.format_path())
# pylint: disable-msg=protected-access
values_class = values.__class__
# pylint: enable-msg=protected-access
# handle {..} containers
if isinstance(types, dict):
if not isinstance(values, dict):
raise SchemaException(
'Error at \'/\': expected {...}, found %s' % (
values_class.__name__))
self.check_value_of_valid_type(values, types, context.new([]))
return
# handle [...] containers
if isinstance(types, list):
if not isinstance(values, list):
raise SchemaException(
'Error at \'/\': expected [...], found %s' % (
values_class.__name__))
for i in range(0, len(values)):
self.check_value_of_valid_type(
values[i], types, context.new('[%s]' % i))
return
raise SchemaException(
'Expected an array or a dictionary.', None,
path=context.format_path())
def escape_quote(value):
return unicode(value).replace('\'', r'\'')
class Unit(object):
"""A class to represent a Unit."""
def __init__(self):
self.id = 0
self.type = ''
self.unit_id = ''
self.title = ''
self.release_date = ''
self.now_available = False
def list_properties(self, name, output):
"""Outputs all properties of the unit."""
output.append('%s[\'id\'] = %s;' % (name, self.id))
output.append('%s[\'type\'] = \'%s\';' % (
name, escape_quote(self.type)))
output.append('%s[\'unit_id\'] = \'%s\';' % (
name, escape_quote(self.unit_id)))
output.append('%s[\'title\'] = \'%s\';' % (
name, escape_quote(self.title)))
output.append('%s[\'release_date\'] = \'%s\';' % (
name, escape_quote(self.release_date)))
output.append('%s[\'now_available\'] = %s;' % (
name, str(self.now_available).lower()))
class Lesson(object):
"""A class to represent a Lesson."""
def __init__(self):
self.unit_id = 0
self.unit_title = ''
self.lesson_id = 0
self.lesson_title = ''
self.lesson_activity = ''
self.lesson_activity_name = ''
self.lesson_notes = ''
self.lesson_video_id = ''
self.lesson_objectives = ''
def list_properties(self, name, output):
"""Outputs all properties of the lesson."""
activity = 'false'
if self.lesson_activity == 'yes':
activity = 'true'
output.append('%s[\'unit_id\'] = %s;' % (name, self.unit_id))
output.append('%s[\'unit_title\'] = \'%s\';' % (
name, escape_quote(self.unit_title)))
output.append('%s[\'lesson_id\'] = %s;' % (name, self.lesson_id))
output.append('%s[\'lesson_title\'] = \'%s\';' % (
name, escape_quote(self.lesson_title)))
output.append('%s[\'lesson_activity\'] = %s;' % (name, activity))
output.append('%s[\'lesson_activity_name\'] = \'%s\';' % (
name, escape_quote(self.lesson_activity_name)))
output.append('%s[\'lesson_notes\'] = \'%s\';' % (
name, escape_quote(self.lesson_notes)))
output.append('%s[\'lesson_video_id\'] = \'%s\';' % (
name, escape_quote(self.lesson_video_id)))
output.append('%s[\'lesson_objectives\'] = \'%s\';' % (
name, escape_quote(self.lesson_objectives)))
def to_id_string(self):
return '%s.%s.%s' % (self.unit_id, self.lesson_id, self.lesson_title)
class Assessment(object):
"""A class to represent a Assessment."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'assessment', SCHEMA['assessment'], self.scope)
class Activity(object):
"""A class to represent a Activity."""
def __init__(self):
self.scope = {}
SchemaHelper().extract_all_terms_to_depth(
'activity', SCHEMA['activity'], self.scope)
def silent_echo(unused_message):
pass
def echo(message):
print message
def is_integer(s):
try:
return int(s) == float(s)
except ValueError:
return False
def is_boolean(s):
try:
return s == 'True' or s == 'False'
except ValueError:
return False
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def is_one_of(value, values):
for current in values:
if value == current:
return True
return False
def text_to_line_numbered_text(text):
"""Adds line numbers to the provided text."""
lines = text.split('\n')
results = []
i = 1
for line in lines:
results.append(str(i) + ': ' + line)
i += 1
return '\n '.join(results)
def set_object_attributes(target_object, names, values):
"""Sets object attributes from provided values."""
if len(names) != len(values):
raise SchemaException(
'The number of elements must match: %s and %s' % (names, values))
for i in range(0, len(names)):
if is_integer(values[i]):
# if we are setting an attribute of an object that support
# metadata, try to infer the target type and convert 'int' into
# 'str' here
target_type = None
if hasattr(target_object.__class__, names[i]):
attribute = getattr(target_object.__class__, names[i])
if hasattr(attribute, 'data_type'):
target_type = attribute.data_type.__name__
if target_type and (target_type == 'str' or
target_type == 'basestring'):
setattr(target_object, names[i], str(values[i]))
else:
setattr(target_object, names[i], int(values[i]))
continue
if is_boolean(values[i]):
setattr(target_object, names[i], bool(values[i]))
continue
setattr(target_object, names[i], values[i])
def read_objects_from_csv_stream(stream, header, new_object):
return read_objects_from_csv(csv.reader(stream), header, new_object)
def read_objects_from_csv_file(fname, header, new_object):
return read_objects_from_csv_stream(open(fname), header, new_object)
def read_objects_from_csv(value_rows, header, new_object):
"""Reads objects from the rows of a CSV file."""
values = []
for row in value_rows:
if not row:
continue
values.append(row)
names = header.split(',')
if names != values[0]:
raise SchemaException(
'Error reading CSV header.\n '
'Header row had %s element(s): %s\n '
'Expected header row with %s element(s): %s' % (
len(values[0]), values[0], len(names), names))
items = []
for i in range(1, len(values)):
if len(names) != len(values[i]):
raise SchemaException(
'Error reading CSV data row.\n '
'Row #%s had %s element(s): %s\n '
'Expected %s element(s): %s' % (
i, len(values[i]), values[i], len(names), names))
# Decode string values in case they were encoded in UTF-8. The CSV
# reader should do this automatically, but it does not. The issue is
# discussed here: http://docs.python.org/2/library/csv.html
decoded_values = []
for value in values[i]:
if isinstance(value, basestring):
value = unicode(value.decode('utf-8'))
decoded_values.append(value)
item = new_object()
set_object_attributes(item, names, decoded_values)
items.append(item)
return items
def escape_javascript_regex(text):
return re.sub(
r'([:][ ]*)([/])(.*)([/][ismx]*)', r': regex("\2\3\4")', text)
def remove_javascript_single_line_comment(text):
text = re.sub(re.compile('^(.*?)[ ]+//(.*)$', re.MULTILINE), r'\1', text)
text = re.sub(re.compile('^//(.*)$', re.MULTILINE), r'', text)
return text
def remove_javascript_multi_line_comment(text):
# pylint: disable-msg=anomalous-backslash-in-string
return re.sub(
re.compile('/\*(.*)\*/', re.MULTILINE + re.DOTALL), r'', text)
# pylint: enable-msg=anomalous-backslash-in-string
def parse_content_marked_no_verify(content):
"""Parses and returns a tuple of real content and no-verify text."""
# If you have any free-form JavaScript in the activity file, you need
# to place it between //<gcb-no-verify> ... //</gcb-no-verify> tags
# so that the verifier can selectively ignore it.
pattern = re.compile('%s(.*)%s' % (
NO_VERIFY_TAG_NAME_OPEN, NO_VERIFY_TAG_NAME_CLOSE), re.DOTALL)
m = pattern.search(content)
noverify_text = None
if m:
noverify_text = m.group(1)
return (re.sub(pattern, '', content), noverify_text)
def convert_javascript_to_python(content, root_name):
"""Removes JavaScript specific syntactic constructs and returns a tuple."""
# Reads the content and removes JavaScript comments, var's, and escapes
# regular expressions.
(content, noverify_text) = parse_content_marked_no_verify(content)
content = remove_javascript_multi_line_comment(content)
content = remove_javascript_single_line_comment(content)
content = content.replace('var %s = ' % root_name, '%s = ' % root_name)
content = escape_javascript_regex(content)
return (content, noverify_text)
def convert_javascript_file_to_python(fname, root_name):
return convert_javascript_to_python(
''.join(open(fname, 'r').readlines()), root_name)
def evaluate_python_expression_from_text(content, root_name, scope,
noverify_text):
"""Compiles and evaluates a Python script in a restricted environment."""
# First compiles and then evaluates a Python script text in a restricted
# environment using provided bindings. Returns the resulting bindings if
# evaluation completed.
# create a new execution scope that has only the schema terms defined;
# remove all other languages constructs including __builtins__
restricted_scope = {}
restricted_scope.update(scope)
restricted_scope.update({'__builtins__': {}})
code = compile(content, '<string>', 'exec')
# pylint: disable-msg=exec-statement
exec code in restricted_scope
# pylint: enable-msg=exec-statement
if noverify_text:
restricted_scope['noverify'] = noverify_text
if not restricted_scope[root_name]:
raise Exception('Unable to find \'%s\'' % root_name)
return restricted_scope
def evaluate_javascript_expression_from_file(fname, root_name, scope, error):
(content, noverify_text) = convert_javascript_file_to_python(fname,
root_name)
try:
return evaluate_python_expression_from_text(content, root_name, scope,
noverify_text)
except:
error('Unable to parse %s in file %s\n %s' % (
root_name, fname, text_to_line_numbered_text(content)))
for message in sys.exc_info():
error(str(message))
raise
class Verifier(object):
"""Verifies Units, Lessons, Assessments, Activities and their relations."""
def __init__(self):
self.echo_func = silent_echo
self.schema_helper = SchemaHelper()
self.errors = 0
self.warnings = 0
self.export = []
def verify_unit_fields(self, units):
self.export.append('units = Array();')
for unit in units:
if not is_one_of(unit.now_available, [True, False]):
self.error(
'Bad now_available \'%s\' for unit id %s; expected '
'\'True\' or \'False\'' % (unit.now_available, unit.id))
if not is_one_of(unit.type, ['U', 'A', 'O']):
self.error(
'Bad type \'%s\' for unit id %s; '
'expected \'U\', \'A\', or \'O\'' % (unit.type, unit.id))
if unit.type == 'A':
if not is_one_of(unit.unit_id, ('Pre', 'Mid', 'Fin')):
self.error(
'Bad unit_id \'%s\'; expected \'Pre\', \'Mid\' or '
'\'Fin\' for unit id %s' % (unit.unit_id, unit.id))
if unit.type == 'U':
if not is_integer(unit.unit_id):
self.error(
'Expected integer unit_id, found %s in unit id '
' %s' % (unit.unit_id, unit.id))
self.export.append('')
self.export.append('units[%s] = Array();' % unit.id)
self.export.append('units[%s][\'lessons\'] = Array();' % unit.id)
unit.list_properties('units[%s]' % unit.id, self.export)
def verify_lesson_fields(self, lessons):
for lesson in lessons:
if not is_one_of(lesson.lesson_activity, ['yes', '']):
self.error('Bad lesson_activity \'%s\' for lesson_id %s' % (
lesson.lesson_activity, lesson.lesson_id))
self.export.append('')
self.export.append('units[%s][\'lessons\'][%s] = Array();' % (
lesson.unit_id, lesson.lesson_id))
lesson.list_properties('units[%s][\'lessons\'][%s]' % (
lesson.unit_id, lesson.lesson_id), self.export)
def verify_unit_lesson_relationships(self, units, lessons):
"""Checks each lesson points to a unit and all lessons are in use."""
used_lessons = []
units.sort(key=lambda x: x.id)
# for unit in units:
for i in range(0, len(units)):
unit = units[i]
# check that unit ids are 1-based and sequential
if unit.id != i + 1:
self.error('Unit out of order: %s' % (unit.id))
# get the list of lessons for each unit
self.fine('Unit %s: %s' % (unit.id, unit.title))
unit_lessons = []
for lesson in lessons:
if lesson.unit_id == unit.unit_id:
if not lesson.unit_title == unit.title:
raise Exception(''.join([
'A unit_title of a lesson (id=%s) must match ',
'title of a unit (id=%s) the lesson belongs to.'
]) % (lesson.lesson_id, lesson.unit_id))
unit_lessons.append(lesson)
used_lessons.append(lesson)
# inspect all lessons for the current unit
unit_lessons.sort(key=lambda x: x.lesson_id)
for j in range(0, len(unit_lessons)):
lesson = unit_lessons[j]
# check that lesson_ids are 1-based and sequential
if lesson.lesson_id != j + 1:
self.warn(
'Lesson lesson_id is out of order: expected %s, found '
' %s (%s)' % (
j + 1, lesson.lesson_id, lesson.to_id_string()))
self.fine(' Lesson %s: %s' % (
lesson.lesson_id, lesson.lesson_title))
# find lessons not used by any of the units
unused_lessons = list(lessons)
for lesson in used_lessons:
unused_lessons.remove(lesson)
for lesson in unused_lessons:
self.warn('Unused lesson_id %s (%s)' % (
lesson.lesson_id, lesson.to_id_string()))
# check all lessons point to known units
for lesson in lessons:
has = False
for unit in units:
if lesson.unit_id == unit.unit_id:
has = True
break
if not has:
self.error('Lesson has unknown unit_id %s (%s)' % (
lesson.unit_id, lesson.to_id_string()))
def verify_activities(self, lessons):
"""Loads and verifies all activities."""
self.info('Loading activities:')
count = 0
for lesson in lessons:
if lesson.lesson_activity == 'yes':
count += 1
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/activity-' + str(lesson.unit_id) + '.' +
str(lesson.lesson_id) + '.js')
if not os.path.exists(fname):
self.error(' Missing activity: %s' % fname)
else:
activity = evaluate_javascript_expression_from_file(
fname, 'activity', Activity().scope, self.error)
self.verify_activity_instance(activity, fname)
self.export.append('')
self.encode_activity_json(
activity, lesson.unit_id, lesson.lesson_id)
self.info('Read %s activities' % count)
def verify_assessment(self, units):
"""Loads and verifies all assessments."""
self.export.append('')
self.export.append('assessments = Array();')
self.info('Loading assessment:')
count = 0
for unit in units:
if unit.type == 'A':
count += 1
assessment_name = str(unit.unit_id)
fname = os.path.join(
os.path.dirname(__file__),
'../assets/js/assessment-' + assessment_name + '.js')
if not os.path.exists(fname):
self.error(' Missing assessment: %s' % fname)
else:
assessment = evaluate_javascript_expression_from_file(
fname, 'assessment', Assessment().scope, self.error)
self.verify_assessment_instance(assessment, fname)
self.export.append('')
self.encode_assessment_json(assessment, assessment_name)
self.info('Read %s assessments' % count)
# NB: The exported script needs to define a gcb_regex() wrapper function
@staticmethod
def encode_regex(regex_str):
"""Encodes a JavaScript-style regex into a Python gcb_regex call."""
# parse the regex into the base and modifiers. e.g., for /foo/i
# base is 'foo' and modifiers is 'i'
assert regex_str[0] == '/'
# find the LAST '/' in regex_str (because there might be other
# escaped '/' characters in the middle of regex_str)
final_slash_index = regex_str.rfind('/')
assert final_slash_index > 0
base = regex_str[1:final_slash_index]
modifiers = regex_str[final_slash_index+1:]
func_str = 'gcb_regex(' + repr(base) + ', ' + repr(modifiers) + ')'
return func_str
def encode_activity_json(self, activity_dict, unit_id, lesson_id):
"""Encodes an activity dictionary into JSON."""
output = []
for elt in activity_dict['activity']:
t = type(elt)
encoded_elt = None
if t is str:
encoded_elt = {'type': 'string', 'value': elt}
elif t is dict:
qt = elt['questionType']
encoded_elt = {'type': qt}
if qt == 'multiple choice':
choices = elt['choices']
encoded_choices = [[x, y.value, z] for x, y, z in choices]
encoded_elt['choices'] = encoded_choices
elif qt == 'multiple choice group':
# everything inside are primitive types that can be encoded
elt_copy = dict(elt)
del elt_copy['questionType'] # redundant
encoded_elt['value'] = elt_copy
elif qt == 'freetext':
for k in elt.keys():
if k == 'questionType':
continue
elif k == 'correctAnswerRegex':
encoded_elt[k] = Verifier.encode_regex(elt[k].value)
else:
# ordinary string
encoded_elt[k] = elt[k]
else:
assert False
else:
assert False
assert encoded_elt
output.append(encoded_elt)
# N.B.: make sure to get the string quoting right!
code_str = "units[%s]['lessons'][%s]['activity'] = " % (
unit_id, lesson_id) + repr(json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in activity_dict:
self.export.append('')
noverify_code_str = "units[%s]['lessons'][%s]['code'] = " % (
unit_id, lesson_id) + repr(activity_dict['noverify']) + ';'
self.export.append(noverify_code_str)
def encode_assessment_json(self, assessment_dict, assessment_name):
"""Encodes an assessment dictionary into JSON."""
real_dict = assessment_dict['assessment']
output = {}
output['assessmentName'] = real_dict['assessmentName']
if 'preamble' in real_dict:
output['preamble'] = real_dict['preamble']
output['checkAnswers'] = real_dict['checkAnswers'].value
encoded_questions_list = []
for elt in real_dict['questionsList']:
encoded_elt = {}
encoded_elt['questionHTML'] = elt['questionHTML']
if 'lesson' in elt:
encoded_elt['lesson'] = elt['lesson']
if 'correctAnswerNumeric' in elt:
encoded_elt['correctAnswerNumeric'] = elt[
'correctAnswerNumeric']
if 'correctAnswerString' in elt:
encoded_elt['correctAnswerString'] = elt['correctAnswerString']
if 'correctAnswerRegex' in elt:
encoded_elt['correctAnswerRegex'] = Verifier.encode_regex(
elt['correctAnswerRegex'].value)
if 'choices' in elt:
encoded_choices = []
correct_answer_index = None
for (ind, e) in enumerate(elt['choices']):
if type(e) is str:
encoded_choices.append(e)
elif e.term_type == CORRECT:
encoded_choices.append(e.value)
correct_answer_index = ind
else:
raise Exception("Invalid type in 'choices'")
encoded_elt['choices'] = encoded_choices
encoded_elt['correctAnswerIndex'] = correct_answer_index
encoded_questions_list.append(encoded_elt)
output['questionsList'] = encoded_questions_list
# N.B.: make sure to get the string quoting right!
code_str = 'assessments[\'' + assessment_name + '\'] = ' + repr(
json.dumps(output)) + ';'
self.export.append(code_str)
if 'noverify' in assessment_dict:
self.export.append('')
noverify_code_str = ('assessments[\'' + assessment_name +
'\'] = ' + repr(assessment_dict['noverify']) +
';')
self.export.append(noverify_code_str)
def format_parse_log(self):
return 'Parse log:\n%s' % '\n'.join(self.schema_helper.parse_log)
def verify_assessment_instance(self, scope, fname):
"""Verifies compliance of assessment with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['assessment'], SCHEMA['assessment'], 'assessment')
self.info(' Verified assessment %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in assessment %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'assessment =\' in %s' % fname)
def verify_activity_instance(self, scope, fname):
"""Verifies compliance of activity with schema."""
if scope:
try:
self.schema_helper.check_instances_match_schema(
scope['activity'], SCHEMA['activity'], 'activity')
self.info(' Verified activity %s' % fname)
if OUTPUT_DEBUG_LOG:
self.info(self.format_parse_log())
except SchemaException as e:
self.error(' Error in activity %s\n%s' % (
fname, self.format_parse_log()))
raise e
else:
self.error(' Unable to evaluate \'activity =\' in %s' % fname)
def fine(self, x):
if OUTPUT_FINE_LOG:
self.echo_func('FINE: ' + x)
def info(self, x):
self.echo_func('INFO: ' + x)
def warn(self, x):
self.warnings += 1
self.echo_func('WARNING: ' + x)
def error(self, x):
self.errors += 1
self.echo_func('ERROR: ' + x)
def load_and_verify_model(self, echo_func):
"""Loads, parses and verifies all content for a course."""
self.echo_func = echo_func
self.info('Started verification in: %s' % __file__)
unit_file = os.path.join(os.path.dirname(__file__), '../data/unit.csv')
lesson_file = os.path.join(
os.path.dirname(__file__), '../data/lesson.csv')
self.info('Loading units from: %s' % unit_file)
units = read_objects_from_csv_file(unit_file, UNITS_HEADER, Unit)
self.info('Read %s units' % len(units))
self.info('Loading lessons from: %s' % lesson_file)
lessons = read_objects_from_csv_file(
lesson_file, LESSONS_HEADER, Lesson)
self.info('Read %s lessons' % len(lessons))
self.verify_unit_fields(units)
self.verify_lesson_fields(lessons)
self.verify_unit_lesson_relationships(units, lessons)
try:
self.verify_activities(lessons)
self.verify_assessment(units)
except SchemaException as e:
self.error(str(e))
self.info('Schema usage statistics: %s' % self.schema_helper.type_stats)
self.info('Completed verification: %s warnings, %s errors.' % (
self.warnings, self.errors))
return self.warnings, self.errors
def run_all_regex_unit_tests():
"""Executes all tests related to regular expressions."""
# pylint: disable-msg=anomalous-backslash-in-string
assert escape_javascript_regex(
'blah regex: /site:bls.gov?/i, blah') == (
'blah regex: regex(\"/site:bls.gov?/i\"), blah')
assert escape_javascript_regex(
'blah regex: /site:http:\/\/www.google.com?q=abc/i, blah') == (
'blah regex: regex(\"/site:http:\/\/www.google.com?q=abc/i\"), '
'blah')
assert remove_javascript_multi_line_comment(
'blah\n/*\ncomment\n*/\nblah') == 'blah\n\nblah'
assert remove_javascript_multi_line_comment(
'blah\nblah /*\ncomment\nblah */\nblah') == ('blah\nblah \nblah')
assert remove_javascript_single_line_comment(
'blah\n// comment\nblah') == 'blah\n\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah http://www.foo.com\nblah') == (
'blah\nblah http://www.foo.com\nblah')
assert remove_javascript_single_line_comment(
'blah\nblah // comment\nblah') == 'blah\nblah\nblah'
assert remove_javascript_single_line_comment(
'blah\nblah // comment http://www.foo.com\nblah') == (
'blah\nblah\nblah')
assert parse_content_marked_no_verify(
'blah1\n// <gcb-no-verify>\n/blah2\n// </gcb-no-verify>\nblah3')[0] == (
'blah1\n// \nblah3')
# pylint: enable-msg=anomalous-backslash-in-string
assert Verifier.encode_regex('/white?/i') == """gcb_regex('white?', 'i')"""
assert (Verifier.encode_regex('/jane austen (book|books) \\-price/i') ==
r"""gcb_regex('jane austen (book|books) \\-price', 'i')""")
assert (Verifier.encode_regex('/Kozanji|Kozan-ji|Kosanji|Kosan-ji/i') ==
r"""gcb_regex('Kozanji|Kozan-ji|Kosanji|Kosan-ji', 'i')""")
assert (Verifier.encode_regex('/Big Time College Sport?/i') ==
"gcb_regex('Big Time College Sport?', 'i')")
assert (Verifier.encode_regex('/354\\s*[+]\\s*651/') ==
r"""gcb_regex('354\\s*[+]\\s*651', '')""")
def run_all_schema_helper_unit_tests():
"""Executes all tests related to schema validation."""
def assert_same(a, b):
if a != b:
raise Exception('Expected:\n %s\nFound:\n %s' % (a, b))
def assert_pass(instances, types, expected_result=None):
try:
schema_helper = SchemaHelper()
result = schema_helper.check_instances_match_schema(
instances, types, 'test')
if OUTPUT_DEBUG_LOG:
print '\n'.join(schema_helper.parse_log)
if expected_result:
assert_same(expected_result, result)
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
print '\n'.join(schema_helper.parse_log)
raise
def assert_fails(func):
try:
func()
raise Exception('Expected to fail')
except SchemaException as e:
if OUTPUT_DEBUG_LOG:
print str(e)
def assert_fail(instances, types):
assert_fails(lambda: assert_pass(instances, types))
def create_python_dict_from_js_object(js_object):
python_str, noverify = convert_javascript_to_python(
'var x = ' + js_object, 'x')
ret = evaluate_python_expression_from_text(
python_str, 'x', Assessment().scope, noverify)
return ret['x']
# CSV tests
read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type', Unit)
def reader_one():
return read_objects_from_csv(
[['id', 'type'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_one)
def reader_two():
read_objects_from_csv(
[['id', 'type', 'title'], [1, 'none']], 'id,type,title', Unit)
assert_fails(reader_two)
# context tests
assert_same(Context().new([]).new(['a']).new(['b', 'c']).format_path(),
('//a/b/c'))
# simple map tests
assert_pass({'name': 'Bob'}, {'name': STRING}, None)
assert_fail('foo', 'bar')
assert_fail({'name': 'Bob'}, {'name': INTEGER})
assert_fail({'name': 12345}, {'name': STRING})
assert_fail({'amount': 12345}, {'name': INTEGER})
assert_fail({'regex': Term(CORRECT)}, {'regex': Term(REGEX)})
assert_pass({'name': 'Bob'}, {'name': STRING, 'phone': STRING})
assert_pass({'name': 'Bob'}, {'phone': STRING, 'name': STRING})
assert_pass({'name': 'Bob'},
{'phone': STRING, 'name': STRING, 'age': INTEGER})
# mixed attributes tests
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_pass({'colors': []}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [STRING]})
assert_fail({'colors': {'red': 'blue'}}, {'colors': [FLOAT]})
assert_fail({'colors': ['red', 'blue', 5.5]}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue', {'foo': 'bar'}]},
{'colors': [STRING]})
assert_fail({'colors': ['red', 'blue'], 'foo': 'bar'},
{'colors': [STRING]})
assert_pass({'colors': ['red', 1]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': [1, 2, 3]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 1, 5.3]}, {'colors': [[STRING, INTEGER]]})
assert_pass({'colors': ['red', 'blue']}, {'colors': [STRING]})
assert_fail({'colors': ['red', 'blue']}, {'colors': [[STRING]]})
assert_fail({'colors': ['red', ['blue']]}, {'colors': [STRING]})
assert_fail({'colors': ['red', ['blue', 'green']]}, {'colors': [STRING]})
# required attribute tests
assert_pass({'colors': ['red', 5]}, {'colors': [[STRING, INTEGER]]})
assert_fail({'colors': ['red', 5]}, {'colors': [[INTEGER, STRING]]})
assert_pass({'colors': ['red', 5]}, {'colors': [STRING, INTEGER]})
assert_pass({'colors': ['red', 5]}, {'colors': [INTEGER, STRING]})
assert_fail({'colors': ['red', 5, 'FF0000']},
{'colors': [[STRING, INTEGER]]})
# an array and a map of primitive type tests
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': ['FF0000']}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': INTEGER}})
assert_fail({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': {'hex': STRING}}})
assert_pass({'color': {'name': 'red', 'rgb': 'FF0000'}},
{'color': {'name': STRING, 'rgb': STRING}})
assert_pass({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'name': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
assert_fail({'colors':
[{'name': 'red', 'rgb': 'FF0000'},
{'phone': 'blue', 'rgb': '0000FF'}]},
{'colors': [{'name': STRING, 'rgb': STRING}]})
# boolean type tests
assert_pass({'name': 'Bob', 'active': True},
{'name': STRING, 'active': BOOLEAN})
assert_pass({'name': 'Bob', 'active': [5, True, False]},
{'name': STRING, 'active': [INTEGER, BOOLEAN]})
assert_pass({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [STRING, INTEGER, BOOLEAN]})
assert_fail({'name': 'Bob', 'active': [5, True, 'False']},
{'name': STRING, 'active': [[INTEGER, BOOLEAN]]})
# optional attribute tests
assert_pass({'points':
[{'x': 1, 'y': 2, 'z': 3}, {'x': 3, 'y': 2, 'z': 1},
{'x': 2, 'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'points':
[{'x': 1, 'z': 3}, {'x': 3, 'y': 2}, {'y': 3, 'z': 1}]},
{'points': [{'x': INTEGER, 'y': INTEGER, 'z': INTEGER}]})
assert_pass({'account':
[{'name': 'Bob', 'age': 25, 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
assert_pass({'account':
[{'name': 'Bob', 'active': True}]},
{'account':
[{'age': INTEGER, 'name': STRING, 'active': BOOLEAN}]})
# nested array tests
assert_fail({'name': 'Bob', 'active': [5, True, 'false']},
{'name': STRING, 'active': [[BOOLEAN]]})
assert_fail({'name': 'Bob', 'active': [True]},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'Bob', 'active': ['true']},
{'name': STRING, 'active': [[STRING]]})
assert_pass({'name': 'flowers', 'price': ['USD', 9.99]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
assert_pass({'name': 'flowers', 'price':
[['USD', 9.99], ['CAD', 11.79], ['RUB', 250.23]]},
{'name': STRING, 'price': [[STRING, FLOAT]]})
# selector tests
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'NY', 'drink': 'wine'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_pass({'likes': [{'state': 'CA', 'food': 'cheese'},
{'state': 'CA', 'food': 'nuts'}]},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
assert_fail({'likes': {'state': 'CA', 'drink': 'cheese'}},
{'likes': [{'state': 'CA', 'food': STRING},
{'state': 'NY', 'drink': STRING}]})
# creating from dict tests
assert_same(create_python_dict_from_js_object('{"active": true}'),
{'active': Term(BOOLEAN, True)})
assert_same(create_python_dict_from_js_object(
'{"a": correct("hello world")}'),
{'a': Term(CORRECT, 'hello world')})
assert_same(create_python_dict_from_js_object('{"a": /hello/i}'),
{'a': Term(REGEX, '/hello/i')})
def run_all_unit_tests():
run_all_regex_unit_tests()
run_all_schema_helper_unit_tests()
run_all_unit_tests()
if __name__ == '__main__':
Verifier().load_and_verify_model(echo)
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allows export of Lessons and Units to other systems."""
__author__ = 'psimakov@google.com (Pavel Simakov)'
from datetime import datetime
import os
import verify
RELEASE_TAG = '1.0'
def echo(unused_x):
pass
JS_GCB_REGEX = """
function gcb_regex(base, modifiers) {
// NB: base should already have backslashes escaped
return new RegExp(base, modifiers);
}
"""
def export_to_javascript(filename, lines, date):
"""Creates JavaScript export function from given lines and writes a file."""
code = []
code.append(JS_GCB_REGEX)
code.append('function gcb_import(){')
for line in lines:
if line:
code.append(' %s' % line)
else:
code.append('')
code.append('')
code.append(' course = Array();')
code.append(' course["units"] = units;')
code.append(' course["assessments"] = assessments;')
code.append(' return course;')
code.append('}')
afile = open('%s.js' % filename, 'w')
afile.write('// Course Builder %s JavaScript Export on %s\n' % (
RELEASE_TAG, date))
afile.write('// begin\n')
afile.write('\n'.join(code))
afile.write('\n// end')
afile.close()
PYTHON_GCB_REGEX = """
import re
def gcb_regex(base, modifiers):
flags = 0
if 'i' in modifiers:
flags |= re.IGNORECASE
if 'm' in modifiers:
flags |= re.MULTILINE
return re.compile(base, flags)
"""
def export_to_python(filename, lines, date):
"""Creates Python export function from given lines and writes a file."""
code = []
code.append('class Array(dict):')
code.append(' pass')
code.append('')
code.append('true = True')
code.append('false = False')
code.append(PYTHON_GCB_REGEX)
code.append('def gcb_import():')
for line in lines:
code.append(' %s' % line)
code.append('')
code.append(' course = Array();')
code.append(' course["units"] = units;')
code.append(' course["assessments"] = assessments;')
code.append(' return course;')
afile = open('%s.py' % filename, 'w')
afile.write('# Course Builder %s Python Export on %s\n' % (
RELEASE_TAG, date))
afile.write('# begin\n')
afile.write('\n'.join(code))
afile.write('\n# end')
afile.close()
# TODO(psimakov): implement PHP_GCB_REGEX, but it's unclear how to return a new
# regexp object in PHP. maybe see http://www.regular-expressions.info/php.html
def export_to_php(filename, lines, date):
"""Creates PHP export function from given lines and writes a file."""
code = []
code.append('function gcb_import(){')
for line in lines:
if line:
code.append(' $%s' % line)
else:
code.append('')
code.append('')
code.append(' $course = Array();')
code.append(' $course["units"] = $units;')
code.append(' $course["assessments"] = $assessments;')
code.append(' return $course;')
code.append('}')
afile = open('%s.php' % filename, 'w')
afile.write('<?php\n')
afile.write('// Course Builder %s PHP Export on %s\n' %
(RELEASE_TAG, date))
afile.write('// begin\n')
afile.write('\n'.join(code))
afile.write('\n// end')
afile.write('?>')
afile.close()
def export_to_file(filename, lines):
date = datetime.utcnow()
export_to_javascript(filename, lines, date)
export_to_python(filename, lines, date)
export_to_php(filename, lines, date)
if __name__ == '__main__':
print 'Export started using %s' % os.path.realpath(__file__)
verifier = verify.Verifier()
errors = verifier.load_and_verify_model(echo)
if errors:
raise Exception('Please fix all errors reported by tools/verify.py '
'before continuing!')
fname = os.path.join(os.getcwd(), 'coursebuilder_course')
export_to_file(fname, verifier.export)
print 'Export complete to %s' % fname
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes and methods to manage all aspects of student assessments."""
__author__ = 'pgbovine@google.com (Philip Guo)'
import datetime
import json
from models import models
from models import utils
from models.models import Student
from models.models import StudentAnswersEntity
from utils import BaseHandler
from google.appengine.ext import db
def store_score(student, assessment_type, score):
"""Stores a student's score on a particular assessment.
Args:
student: the student whose data is stored.
assessment_type: the type of the assessment.
score: the student's score on this assessment.
Returns:
the (possibly modified) assessment_type, which the caller can
use to render an appropriate response page.
"""
# FIXME: Course creators can edit this code to implement custom
# assessment scoring and storage behavior
# TODO(pgbovine): Note that the latest version of answers are always saved,
# but scores are only saved if they're higher than the previous attempt.
# This can lead to unexpected analytics behavior. Resolve this.
existing_score = utils.get_score(student, assessment_type)
# remember to cast to int for comparison
if (existing_score is None) or (score > int(existing_score)):
utils.set_score(student, assessment_type, score)
# special handling for computing final score:
if assessment_type == 'postcourse':
midcourse_score = utils.get_score(student, 'midcourse')
if midcourse_score is None:
midcourse_score = 0
else:
midcourse_score = int(midcourse_score)
if existing_score is None:
postcourse_score = score
else:
postcourse_score = int(existing_score)
if score > postcourse_score:
postcourse_score = score
# Calculate overall score based on a formula
overall_score = int((0.3 * midcourse_score) + (0.7 * postcourse_score))
# TODO(pgbovine): this changing of assessment_type is ugly ...
if overall_score >= 70:
assessment_type = 'postcourse_pass'
else:
assessment_type = 'postcourse_fail'
utils.set_score(student, 'overall_score', overall_score)
return assessment_type
class AnswerHandler(BaseHandler):
"""Handler for saving assessment answers."""
# Find student entity and save answers
@db.transactional(xg=True)
def update_assessment_transaction(
self, email, assessment_type, new_answers, score):
"""Stores answer and updates user scores."""
student = Student.get_by_email(email)
# It may be that old Student entities don't have user_id set; fix it.
if not student.user_id:
student.user_id = self.get_user().user_id()
answers = StudentAnswersEntity.get_by_key_name(student.user_id)
if not answers:
answers = StudentAnswersEntity(key_name=student.user_id)
answers.updated_on = datetime.datetime.now()
utils.set_answer(answers, assessment_type, new_answers)
assessment_type = store_score(student, assessment_type, score)
student.put()
answers.put()
# Also record the event, which is useful for tracking multiple
# submissions and history.
models.EventEntity.record(
'submit-assessment', self.get_user(), json.dumps({
'type': 'assessment-%s' % assessment_type,
'values': new_answers, 'location': 'AnswerHandler'}))
return (student, assessment_type)
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'assessment-post'):
return
assessment_type = self.request.get('assessment_type')
# Convert answers from JSON to dict.
answers = self.request.get('answers')
if answers:
answers = json.loads(answers)
else:
answers = []
# TODO(pgbovine): consider storing as float for better precision
score = int(round(float(self.request.get('score'))))
# Record score.
(student, assessment_type) = self.update_assessment_transaction(
student.key().name(), assessment_type, answers, score)
self.template_value['navbar'] = {'course': True}
self.template_value['assessment'] = assessment_type
self.template_value['student_score'] = utils.get_score(
student, 'overall_score')
self.render('test_confirmation.html')
| Python |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers for generating various frontend pages."""
__author__ = 'Saifu Angto (saifu@google.com)'
import json
from models import models
from models.config import ConfigProperty
from models.counters import PerfCounter
from utils import BaseHandler
from utils import BaseRESTHandler
from utils import XsrfTokenManager
# Whether to record events in a database.
CAN_PERSIST_ACTIVITY_EVENTS = ConfigProperty(
'gcb_can_persist_activity_events', bool, (
'Whether or not to record student activity interactions in a '
'datastore. Without event recording, you cannot analyze student '
'activity interactions. On the other hand, no event recording reduces '
'the number of datastore operations and minimizes the use of Google '
'App Engine quota. Turn event recording on if you want to analyze '
'this data.'),
False)
COURSE_EVENTS_RECEIVED = PerfCounter(
'gcb-course-events-received',
'A number of activity/assessment events received by the server.')
COURSE_EVENTS_RECORDED = PerfCounter(
'gcb-course-events-recorded',
'A number of activity/assessment events recorded in a datastore.')
def extract_unit_and_lesson_id(handler):
"""Extracts unit and lesson id from the request."""
c = handler.request.get('unit')
if not c:
unit_id = 1
else:
unit_id = int(c)
l = handler.request.get('lesson')
if not l:
lesson_id = 1
else:
lesson_id = int(l)
return unit_id, lesson_id
class CourseHandler(BaseHandler):
"""Handler for generating course page."""
@classmethod
def get_child_routes(cls):
"""Add child handlers for REST."""
return [('/rest/events', EventsRESTHandler)]
def get(self):
"""Handles GET requests."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect('/preview')
return None
if not self.personalize_page_and_get_enrolled():
return
self.template_value['units'] = self.get_units()
self.template_value['navbar'] = {'course': True}
self.render('course.html')
class UnitHandler(BaseHandler):
"""Handler for generating unit page."""
def get(self):
"""Handles GET requests."""
if not self.personalize_page_and_get_enrolled():
return
# Extract incoming args
unit_id, lesson_id = extract_unit_and_lesson_id(self)
self.template_value['unit_id'] = unit_id
self.template_value['lesson_id'] = lesson_id
# Set template values for a unit and its lesson entities
for unit in self.get_units():
if unit.unit_id == str(unit_id):
self.template_value['units'] = unit
lessons = self.get_lessons(unit_id)
self.template_value['lessons'] = lessons
# Set template values for nav bar
self.template_value['navbar'] = {'course': True}
# Set template values for back and next nav buttons
if lesson_id == 1:
self.template_value['back_button_url'] = ''
elif lessons[lesson_id - 2].activity:
self.template_value['back_button_url'] = (
'activity?unit=%s&lesson=%s' % (unit_id, lesson_id - 1))
else:
self.template_value['back_button_url'] = (
'unit?unit=%s&lesson=%s' % (unit_id, lesson_id - 1))
if lessons[lesson_id - 1].activity:
self.template_value['next_button_url'] = (
'activity?unit=%s&lesson=%s' % (unit_id, lesson_id))
elif lesson_id == len(lessons):
self.template_value['next_button_url'] = ''
else:
self.template_value['next_button_url'] = (
'unit?unit=%s&lesson=%s' % (unit_id, lesson_id + 1))
self.render('unit.html')
class ActivityHandler(BaseHandler):
"""Handler for generating activity page and receiving submissions."""
def get(self):
"""Handles GET requests."""
if not self.personalize_page_and_get_enrolled():
return
# Extract incoming args
unit_id, lesson_id = extract_unit_and_lesson_id(self)
self.template_value['unit_id'] = unit_id
self.template_value['lesson_id'] = lesson_id
# Set template values for a unit and its lesson entities
for unit in self.get_units():
if unit.unit_id == str(unit_id):
self.template_value['units'] = unit
lessons = self.get_lessons(unit_id)
self.template_value['lessons'] = lessons
# Set template values for nav bar
self.template_value['navbar'] = {'course': True}
# Set template values for back and next nav buttons
self.template_value['back_button_url'] = (
'unit?unit=%s&lesson=%s' % (unit_id, lesson_id))
if lesson_id == len(lessons):
self.template_value['next_button_url'] = ''
else:
self.template_value['next_button_url'] = (
'unit?unit=%s&lesson=%s' % (unit_id, lesson_id + 1))
self.template_value['record_events'] = CAN_PERSIST_ACTIVITY_EVENTS.value
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.render('activity.html')
class AssessmentHandler(BaseHandler):
"""Handler for generating assessment page."""
def get(self):
"""Handles GET requests."""
if not self.personalize_page_and_get_enrolled():
return
# Extract incoming args
n = self.request.get('name')
if not n:
n = 'Pre'
self.template_value['name'] = n
self.template_value['navbar'] = {'course': True}
self.template_value['record_events'] = CAN_PERSIST_ACTIVITY_EVENTS.value
self.template_value['assessment_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('assessment-post'))
self.template_value['event_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('event-post'))
self.render('assessment.html')
class EventsRESTHandler(BaseRESTHandler):
"""Provides REST API for an Event."""
def post(self):
"""Receives event and puts it into datastore."""
COURSE_EVENTS_RECEIVED.inc()
if not CAN_PERSIST_ACTIVITY_EVENTS.value:
return
request = json.loads(self.request.get('request'))
if not self.assert_xsrf_token_or_fail(request, 'event-post', {}):
return
user = self.get_user()
if not user:
return
student = models.Student.get_enrolled_student_by_email(user.email())
if not student:
return
models.EventEntity.record(
request.get('source'), user, request.get('payload'))
COURSE_EVENTS_RECORDED.inc()
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handlers that are not directly related to course content."""
__author__ = 'Saifu Angto (saifu@google.com)'
import base64
import hmac
import time
import urlparse
from models import transforms
from models.config import ConfigProperty
from models.courses import Course
from models.models import MemcacheManager
from models.models import Student
from models.roles import Roles
from models.utils import get_all_scores
import webapp2
from google.appengine.api import users
# The name of the template dict key that stores a course's base location.
COURSE_BASE_KEY = 'gcb_course_base'
# The name of the template dict key that stores data from course.yaml.
COURSE_INFO_KEY = 'course_info'
XSRF_SECRET = ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
class ReflectiveRequestHandler(object):
"""Uses reflection to handle custom get() and post() requests.
Use this class as a mix-in with any webapp2.RequestHandler to allow request
dispatching to multiple get() and post() methods based on the 'action'
parameter.
Open your existing webapp2.RequestHandler, add this class as a mix-in.
Define the following class variables:
default_action = 'list'
get_actions = ['default_action', 'edit']
post_actions = ['save']
Add instance methods named get_list(self), get_edit(self), post_save(self).
These methods will now be called automatically based on the 'action'
GET/POST parameter.
"""
def create_xsrf_token(self, action):
return XsrfTokenManager.create_xsrf_token(action)
def get(self):
"""Handles GET."""
action = self.request.get('action')
if not action:
action = self.__class__.default_action
if not action in self.__class__.get_actions:
self.error(404)
return
handler = getattr(self, 'get_%s' % action)
if not handler:
self.error(404)
return
return handler()
def post(self):
"""Handles POST."""
action = self.request.get('action')
if not action or not action in self.__class__.post_actions:
self.error(404)
return
handler = getattr(self, 'post_%s' % action)
if not handler:
self.error(404)
return
# Each POST request must have valid XSRF token.
xsrf_token = self.request.get('xsrf_token')
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
self.error(403)
return
return handler()
class ApplicationHandler(webapp2.RequestHandler):
"""A handler that is aware of the application context."""
@classmethod
def is_absolute(cls, url):
return bool(urlparse.urlparse(url).scheme)
@classmethod
def get_base_href(cls, handler):
"""Computes current course <base> href."""
base = handler.app_context.get_slug()
if not base.endswith('/'):
base = '%s/' % base
# For IE to work with the <base> tag, its href must be an absolute URL.
if not cls.is_absolute(base):
parts = urlparse.urlparse(handler.request.url)
base = urlparse.urlunparse(
(parts.scheme, parts.netloc, base, None, None, None))
return base
def __init__(self, *args, **kwargs):
super(ApplicationHandler, self).__init__(*args, **kwargs)
self.template_value = {}
def get_template(self, template_file, additional_dirs=None):
"""Computes location of template files for the current namespace."""
self.template_value[COURSE_INFO_KEY] = self.app_context.get_environ()
self.template_value['is_course_admin'] = Roles.is_course_admin(
self.app_context)
self.template_value['is_super_admin'] = Roles.is_super_admin()
self.template_value[COURSE_BASE_KEY] = self.get_base_href(self)
return self.app_context.get_template_environ(
self.template_value[COURSE_INFO_KEY]['course']['locale'],
additional_dirs
).get_template(template_file)
def canonicalize_url(self, location):
"""Adds the current namespace URL prefix to the relative 'location'."""
if not self.is_absolute(location):
if (self.app_context.get_slug() and
self.app_context.get_slug() != '/'):
location = '%s%s' % (self.app_context.get_slug(), location)
return location
def redirect(self, location):
super(ApplicationHandler, self).redirect(
self.canonicalize_url(location))
class BaseHandler(ApplicationHandler):
"""Base handler."""
def __init__(self, *args, **kwargs):
super(BaseHandler, self).__init__(*args, **kwargs)
self.course = None
def get_course(self):
if not self.course:
self.course = Course(self)
return self.course
def get_units(self):
"""Gets all units in the course."""
return self.get_course().get_units()
def get_lessons(self, unit_id):
"""Gets all lessons (in order) in the specific course unit."""
return self.get_course().get_lessons(unit_id)
def get_user(self):
"""Validate user exists."""
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
else:
return user
def personalize_page_and_get_user(self):
"""If the user exists, add personalized fields to the navbar."""
user = self.get_user()
if user:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = users.create_logout_url('/')
return user
def personalize_page_and_get_enrolled(self):
"""If the user is enrolled, add personalized fields to the navbar."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return None
student = Student.get_enrolled_student_by_email(user.email())
if not student:
self.redirect('/preview')
return None
return student
def assert_xsrf_token_or_fail(self, request, action):
"""Asserts the current request has proper XSRF token or fails."""
token = request.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
self.error(403)
return False
return True
def render(self, template_file):
template = self.get_template(template_file)
self.response.out.write(template.render(self.template_value))
class BaseRESTHandler(BaseHandler):
"""Base REST handler."""
def assert_xsrf_token_or_fail(self, token_dict, action, args_dict):
"""Asserts that current request has proper XSRF token or fails."""
token = token_dict.get('xsrf_token')
if not token or not XsrfTokenManager.is_xsrf_token_valid(token, action):
transforms.send_json_response(
self, 403,
'Bad XSRF token. Please reload the page and try again',
args_dict)
return False
return True
class PreviewHandler(BaseHandler):
"""Handler for viewing course preview."""
def get(self):
"""Handles GET requests."""
user = users.get_current_user()
if not user:
self.template_value['loginUrl'] = users.create_login_url('/')
else:
self.template_value['email'] = user.email()
self.template_value['logoutUrl'] = users.create_logout_url('/')
self.template_value['navbar'] = {'course': True}
self.template_value['units'] = self.get_units()
if user and Student.get_enrolled_student_by_email(user.email()):
self.redirect('/course')
else:
self.render('preview.html')
class RegisterHandler(BaseHandler):
"""Handler for course registration."""
def get(self):
"""Handles GET request."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
student = Student.get_enrolled_student_by_email(user.email())
if student:
self.redirect('/course')
return
self.template_value['navbar'] = {'registration': True}
self.template_value['register_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('register-post'))
self.render('register.html')
def post(self):
"""Handles POST requests."""
user = self.personalize_page_and_get_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
if not self.assert_xsrf_token_or_fail(self.request, 'register-post'):
return
can_register = self.app_context.get_environ(
)['reg_form']['can_register']
if not can_register:
self.template_value['course_status'] = 'full'
else:
name = self.request.get('form01')
# create new or re-enroll old student
student = Student.get_by_email(user.email())
if not student:
student = Student(key_name=user.email())
student.user_id = user.user_id()
student.is_enrolled = True
student.name = name
student.put()
# Render registration confirmation page
self.template_value['navbar'] = {'registration': True}
self.render('confirmation.html')
class ForumHandler(BaseHandler):
"""Handler for forum page."""
def get(self):
"""Handles GET requests."""
if not self.personalize_page_and_get_enrolled():
return
self.template_value['navbar'] = {'forum': True}
self.render('forum.html')
class StudentProfileHandler(BaseHandler):
"""Handles the click to 'My Profile' link in the nav bar."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
self.template_value['navbar'] = {}
self.template_value['student'] = student
self.template_value['scores'] = get_all_scores(student)
self.template_value['student_edit_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-edit'))
self.render('student_profile.html')
class StudentEditStudentHandler(BaseHandler):
"""Handles edits to student records by students."""
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-edit'):
return
Student.rename_current(self.request.get('name'))
self.redirect('/student/home')
class StudentUnenrollHandler(BaseHandler):
"""Handler for students to unenroll themselves."""
def get(self):
"""Handles GET requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
self.template_value['student'] = student
self.template_value['navbar'] = {'registration': True}
self.template_value['student_unenroll_xsrf_token'] = (
XsrfTokenManager.create_xsrf_token('student-unenroll'))
self.render('unenroll_confirmation_check.html')
def post(self):
"""Handles POST requests."""
student = self.personalize_page_and_get_enrolled()
if not student:
return
if not self.assert_xsrf_token_or_fail(self.request, 'student-unenroll'):
return
Student.set_enrollment_status_for_current(False)
self.template_value['navbar'] = {'registration': True}
self.render('unenroll_confirmation.html')
class XsrfTokenManager(object):
"""Provides XSRF protection by managing action/user tokens in memcache."""
# Max age of the token (4 hours).
XSRF_TOKEN_AGE_SECS = 60 * 60 * 4
# Token delimiters.
DELIMITER_PRIVATE = ':'
DELIMITER_PUBLIC = '/'
# Default nickname to use if a user does not have a nickname,
USER_ID_DEFAULT = 'default'
@classmethod
def _create_token(cls, action_id, issued_on):
"""Creates a string representation (digest) of a token."""
# We have decided to use transient tokens to reduce datastore costs.
# The token has 4 parts: hash of the actor user id, hash of the action,
# hash if the time issued and the plain text of time issued.
# Lookup user id.
user = users.get_current_user()
if user:
user_id = user.user_id()
else:
user_id = cls.USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digester = hmac.new(str(XSRF_SECRET.value))
digester.update(str(user_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(action_id))
digester.update(cls.DELIMITER_PRIVATE)
digester.update(str(issued_on))
digest = digester.digest()
token = '%s%s%s' % (
issued_on, cls.DELIMITER_PUBLIC, base64.urlsafe_b64encode(digest))
return token
@classmethod
def create_xsrf_token(cls, action):
return cls._create_token(action, time.time())
@classmethod
def is_xsrf_token_valid(cls, token, action):
"""Validate a given XSRF token by retrieving it from memcache."""
try:
parts = token.split(cls.DELIMITER_PUBLIC)
if not len(parts) == 2:
raise Exception('Bad token format, expected: a/b.')
issued_on = long(parts[0])
age = time.time() - issued_on
if age > cls.XSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(action, issued_on)
if authentic_token == token:
return True
return False
except Exception: # pylint: disable-msg=broad-except
return False
user_str = cls._make_user_str()
token_obj = MemcacheManager.get(token)
if not token_obj:
return False
token_str, token_action = token_obj
if user_str != token_str or action != token_action:
return False
return True
| Python |
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @author: psimakov@google.com (Pavel Simakov)
"""Enables hosting of multiple courses in one application instance.
We used to allow hosting of only one course in one Google App Engine instance.
Now we allow hosting of many courses simultaneously. To configure multiple
courses one must set an environment variable in app.yaml file, for example:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/coursea:/courses/a, course:/courseb:/courses/b'
...
This variable holds a ',' separated list of rewrite rules. Each rewrite rule
has three ':' separated parts: the word 'course', the URL prefix, and the file
system location for the site files. The fourth, optional part, is a course
namespace name.
The URL prefix specifies, how will the course URL appear in the browser. In the
example above, the courses will be mapped to http://www.example.com[/coursea]
and http://www.example.com[/courseb].
The file system location of the files specifies, which files to serve for the
course. For each course we expect three sub-folders: 'assets', 'views', and
'data'. The 'data' folder must contain the CSV files that define the course
layout, the 'assets' and 'views' should contain the course specific files and
jinja2 templates respectively. In the example above, the course files are
expected to be placed into folders '/courses/a' and '/courses/b' of your Google
App Engine installation respectively.
By default Course Builder handles static '/assets' files using a custom
handler. You may choose to handle '/assets' files of your course as 'static'
files using Google App Engine handler. You can do so by creating a new static
file handler entry in your app.yaml and placing it before our main course
handler.
If you have an existing course developed using Course Builder and do NOT want
to host multiple courses, there is nothing for you to do. A following default
rule is silently created for you:
...
env_variables:
GCB_COURSES_CONFIG: 'course:/:/'
...
It sets the '/' as the base URL for the course, uses root folder of your Google
App Engine installation to look for course /assets/..., /data/..., and
/views/... and uses blank datastore and memcache namespace. All in all,
everything behaves just as it did in the prior version of Course Builder when
only one course was supported.
If you have existing course developed using Course Builder and DO want to start
hosting multiple courses here are the steps. First, define the courses
configuration environment variable as described above. Second, copy existing
'assets', 'data' and 'views' folders of your course into the new location, for
example '/courses/mycourse'.
If you have an existing course built on a previous version of Course Builder
and you now decided to use new URL prefix, which is not '/', you will need
to update your old course html template and JavaScript files. You typically
would have to make two modifications. First, replace all absolute URLs with
the relative URLs. For example, if you had <a href='/forum'>..</a>, you will
need to replace it with <a href='forum'>..</a>. Second, you need to add <base>
tag at the top of you course 'base.html' and 'base_registration.html' files,
like this:
...
<head>
<base href="{{ gcb_course_base }}" />
...
Current Course Builder release already has all these modifications.
Note, that each 'course' runs in a separate Google App Engine namespace. The
name of the namespace is derived from the course files location. In the example
above, the course files are stored in the folder '/courses/a', which be mapped
to the namespace name 'gcb-courses-a'. The namespaces can't contain '/', so we
replace them with '-' and prefix the namespace with the project abbreviation
'gcb'. Remember these namespace names, you will need to use them if/when
accessing server administration panel, viewing objects in the datastore, etc.
Don't move the files to another folder after your course starts as a new folder
name will create a new namespace name and old data will no longer be used. You
are free to rename the course URL prefix at any time. Once again, if you are
not hosting multiple courses, your course will run in a default namespace
(None).
Good luck!
"""
import logging
import mimetypes
import os
import threading
import appengine_config
from models.counters import PerfCounter
from models.vfs import LocalReadOnlyFileSystem
import webapp2
from webapp2_extras import i18n
import yaml
from google.appengine.api import namespace_manager
from google.appengine.ext import zipserve
# the name of environment variable that holds rewrite rule definitions
GCB_COURSES_CONFIG_ENV_VAR_NAME = 'GCB_COURSES_CONFIG'
# base name for all course namespaces
GCB_BASE_COURSE_NAMESPACE = 'gcb-course'
# these folder and file names are reserved
GCB_ASSETS_FOLDER_NAME = os.path.normpath('/assets/')
GCB_VIEWS_FOLDER_NAME = os.path.normpath('/views/')
GCB_DATA_FOLDER_NAME = os.path.normpath('/data/')
GCB_CONFIG_FILENAME = os.path.normpath('/course.yaml')
# supported site types
SITE_TYPE_COURSE = 'course'
# default 'Cache-Control' HTTP header for static files
DEFAULT_CACHE_CONTROL_MAX_AGE = 600
DEFAULT_CACHE_CONTROL_PUBLIC = 'public'
# enable debug output
DEBUG_INFO = False
# thread local storage for current request PATH_INFO
PATH_INFO_THREAD_LOCAL = threading.local()
# performance counters
STATIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-static',
'A number of times request was served via static handler.')
DYNAMIC_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-dynamic',
'A number of times request was served via dynamic handler.')
ZIP_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-zip',
'A number of times request was served via zip handler.')
NO_HANDLER_COUNT = PerfCounter(
'gcb-sites-handler-none',
'A number of times request was not matched to any handler.')
HTTP_BYTES_IN = PerfCounter(
'gcb-sites-bytes-in',
'A number of bytes received from clients by the handler.')
HTTP_BYTES_OUT = PerfCounter(
'gcb-sites-bytes-out',
'A number of bytes sent out from the handler to clients.')
HTTP_STATUS_200 = PerfCounter(
'gcb-sites-http-20x',
'A number of times HTTP status code 20x was returned.')
HTTP_STATUS_300 = PerfCounter(
'gcb-sites-http-30x',
'A number of times HTTP status code 30x was returned.')
HTTP_STATUS_400 = PerfCounter(
'gcb-sites-http-40x',
'A number of times HTTP status code 40x was returned.')
HTTP_STATUS_500 = PerfCounter(
'gcb-sites-http-50x',
'A number of times HTTP status code 50x was returned.')
COUNTER_BY_HTTP_CODE = {
200: HTTP_STATUS_200, 300: HTTP_STATUS_300, 400: HTTP_STATUS_400,
500: HTTP_STATUS_500}
def count_stats(handler):
"""Records statistics about the request and the response."""
try:
# Record request bytes in.
if handler.request and handler.request.content_length:
HTTP_BYTES_IN.inc(handler.request.content_length)
# Record response HTTP status code.
if handler.response and handler.response.status_int:
rounded_status_code = (handler.response.status_int / 100) * 100
counter = COUNTER_BY_HTTP_CODE[rounded_status_code]
if not counter:
logging.error(
'Unknown HTTP status code: %s.',
handler.response.status_code)
else:
counter.inc()
# Record response bytes out.
if handler.response and handler.response.content_length:
HTTP_BYTES_OUT.inc(handler.response.content_length)
except Exception as e: # pylint: disable-msg=broad-except
logging.error('Failed to count_stats(): %s.', str(e))
def has_path_info():
"""Checks if PATH_INFO is defined for the thread local."""
return hasattr(PATH_INFO_THREAD_LOCAL, 'path')
def set_path_info(path):
"""Stores PATH_INFO in thread local."""
if not path:
raise Exception('Use \'unset()\' instead.')
if has_path_info():
raise Exception('Expected no path set.')
PATH_INFO_THREAD_LOCAL.path = path
PATH_INFO_THREAD_LOCAL.old_namespace = namespace_manager.get_namespace()
namespace_manager.set_namespace(
ApplicationContext.get_namespace_name_for_request())
def get_path_info():
"""Gets PATH_INFO from thread local."""
return PATH_INFO_THREAD_LOCAL.path
def unset_path_info():
"""Removed PATH_INFO from thread local."""
if not has_path_info():
raise Exception('Expected valid path already set.')
namespace_manager.set_namespace(
PATH_INFO_THREAD_LOCAL.old_namespace)
del PATH_INFO_THREAD_LOCAL.old_namespace
del PATH_INFO_THREAD_LOCAL.path
def debug(message):
if DEBUG_INFO:
logging.info(message)
def make_default_rule():
# The default is: one course in the root folder of the None namespace.
return ApplicationContext(
'course', '/', '/', appengine_config.DEFAULT_NAMESPACE_NAME)
def get_all_courses():
"""Reads all course rewrite rule definitions from environment variable."""
default = make_default_rule()
if not GCB_COURSES_CONFIG_ENV_VAR_NAME in os.environ:
return [default]
var_string = os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
if not var_string:
return [default]
slugs = {}
namespaces = {}
all_contexts = []
for rule in var_string.split(','):
rule = rule.strip()
if not rule:
continue
parts = rule.split(':')
# validate length
if len(parts) < 3:
raise Exception('Expected rule definition of the form '
' \'type:slug:folder[:ns]\', got %s: ' % rule)
# validate type
if parts[0] != SITE_TYPE_COURSE:
raise Exception('Expected \'%s\', found: \'%s\'.'
% (SITE_TYPE_COURSE, parts[0]))
site_type = parts[0]
# validate slug
if parts[1] in slugs:
raise Exception('Slug already defined: %s.' % parts[1])
slugs[parts[1]] = True
slug = parts[1]
# validate folder name
folder = parts[2]
# validate or derive namespace
namespace = appengine_config.DEFAULT_NAMESPACE_NAME
if len(parts) == 4:
namespace = parts[3]
else:
if folder and folder != '/':
namespace = '%s%s' % (GCB_BASE_COURSE_NAMESPACE,
folder.replace('/', '-'))
if namespace in namespaces:
raise Exception('Namespace already defined: %s.' % namespace)
namespaces[namespace] = True
all_contexts.append(ApplicationContext(
site_type, slug, folder, namespace))
return all_contexts
def get_course_for_current_request():
"""Chooses course that matches current request context path."""
# get path if defined
if not has_path_info():
return None
path = get_path_info()
# Get all rules.
courses = get_all_courses()
# Match a path to a course.
# TODO(psimakov): linear search is unacceptable
for course in courses:
if path == course.get_slug() or path.startswith(
'%s/' % course.get_slug()) or course.get_slug() == '/':
return course
debug('No mapping for: %s' % path)
return None
def path_join(base, path):
"""Joins 'base' and 'path' ('path' is interpreted as a relative path).
This method is like os.path.join(), but 'path' is interpreted relatively.
E.g., os.path.join('/a/b', '/c') yields '/c', but this function yields
'/a/b/c'.
Args:
base: The base path.
path: The path to append to base; this is treated as a relative path.
Returns:
The path obtaining by appending 'path' to 'base'.
"""
if os.path.isabs(path):
# Remove drive letter (if we are on Windows).
unused_drive, path_no_drive = os.path.splitdrive(path)
# Remove leading path separator.
path = path_no_drive[1:]
return os.path.join(base, path)
def abspath(home_folder, filename):
"""Creates an absolute URL for a filename in a home folder."""
return path_join(appengine_config.BUNDLE_ROOT,
path_join(home_folder, filename))
def unprefix(path, prefix):
"""Remove the prefix from path. Append '/' if an empty string results."""
if not path.startswith(prefix):
raise Exception('Not prefixed.')
if prefix != '/':
path = path[len(prefix):]
if not path:
path = '/'
return path
def set_static_resource_cache_control(handler):
"""Properly sets Cache-Control for a WebOb/webapp2 response."""
handler.response.cache_control.no_cache = None
handler.response.cache_control.public = DEFAULT_CACHE_CONTROL_PUBLIC
handler.response.cache_control.max_age = DEFAULT_CACHE_CONTROL_MAX_AGE
def make_zip_handler(zipfilename):
"""Creates a handler that serves files from a zip file."""
class CustomZipHandler(zipserve.ZipHandler):
"""Custom ZipHandler that properly controls caching."""
def get(self, name):
"""Handles GET request."""
ZIP_HANDLER_COUNT.inc()
self.ServeFromZipFile(zipfilename, name)
count_stats(self)
def SetCachingHeaders(self): # pylint: disable=C6409
"""Properly controls caching."""
set_static_resource_cache_control(self)
return CustomZipHandler
class AssetHandler(webapp2.RequestHandler):
"""Handles serving of static resources located on the file system."""
def __init__(self, app_context, filename):
self.app_context = app_context
self.filename = filename
def get_mime_type(self, filename, default='application/octet-stream'):
guess = mimetypes.guess_type(filename)[0]
if guess is None:
return default
return guess
def get(self):
"""Handles GET requests."""
debug('File: %s' % self.filename)
if not self.app_context.fs.isfile(self.filename):
self.error(404)
return
set_static_resource_cache_control(self)
self.response.headers['Content-Type'] = self.get_mime_type(
self.filename)
self.response.write(
self.app_context.fs.open(self.filename).read())
class ApplicationContext(object):
"""An application context for a request/response."""
@classmethod
def get_namespace_name_for_request(cls):
"""Gets the name of the namespace to use for this request.
(Examples of such namespaces are NDB and memcache.)
Returns:
The namespace for the current request, or None if no course matches
the current request context path.
"""
course = get_course_for_current_request()
if course:
return course.namespace
return appengine_config.DEFAULT_NAMESPACE_NAME
@classmethod
def after_create(cls, instance):
"""Override this method to manipulate freshly created instance."""
pass
def __init__(self, site_type, slug, homefolder, namespace, fs=None):
"""Creates new application context.
Args:
site_type: Specifies the type of context. Must be 'course' for now.
slug: A common context path prefix for all URLs in the context.
homefolder: A folder with the assets belonging to this context.
namespace: A name of a datastore namespace for use by this context.
fs: A file system object to be used for accessing homefolder.
Returns:
The new instance of namespace object.
"""
self.type = site_type
self.slug = slug
self.homefolder = homefolder
self.namespace = namespace
if fs:
self._fs = fs
else:
self._fs = LocalReadOnlyFileSystem()
self.after_create(self)
@ property
def fs(self):
return self._fs
def get_namespace_name(self):
return self.namespace
def get_home_folder(self):
return self.homefolder
def get_slug(self):
return self.slug
def get_config_filename(self):
"""Returns absolute location of a course configuration file."""
filename = abspath(self.get_home_folder(), GCB_CONFIG_FILENAME)
debug('Config file: %s' % filename)
return filename
def get_environ(self):
"""Returns a dict of course configuration variables."""
course_data_filename = self.get_config_filename()
try:
return yaml.load(self.fs.open(course_data_filename))
except Exception:
logging.info('Error: course.yaml file at %s not accessible',
course_data_filename)
raise
def get_template_home(self):
"""Returns absolute location of a course template folder."""
path = abspath(self.get_home_folder(), GCB_VIEWS_FOLDER_NAME)
debug('Template home: %s' % path)
return path
def get_data_home(self):
"""Returns absolute location of a course data folder."""
path = abspath(self.get_home_folder(), GCB_DATA_FOLDER_NAME)
debug('Data home: %s' % path)
return path
def get_template_environ(self, locale, additional_dirs):
"""Create and configure jinja template evaluation environment."""
template_dir = self.get_template_home()
dirs = [template_dir]
if additional_dirs:
dirs += additional_dirs
jinja_environment = self.fs.get_jinja_environ(dirs)
i18n.get_i18n().set_locale(locale)
jinja_environment.install_gettext_translations(i18n)
return jinja_environment
class ApplicationRequestHandler(webapp2.RequestHandler):
"""Handles dispatching of all URL's to proper handlers."""
@classmethod
def bind_to(cls, urls, urls_map):
"""Recursively builds a map from a list of (URL, Handler) tuples."""
for url in urls:
path_prefix = url[0]
handler = url[1]
urls_map[path_prefix] = handler
# add child handlers
if hasattr(handler, 'get_child_routes'):
cls.bind_to(handler.get_child_routes(), urls_map)
@classmethod
def bind(cls, urls):
urls_map = {}
cls.bind_to(urls, urls_map)
cls.urls_map = urls_map
def get_handler(self):
"""Finds a course suitable for handling this request."""
course = get_course_for_current_request()
if not course:
return None
path = get_path_info()
if not path:
return None
return self.get_handler_for_course_type(
course, unprefix(path, course.get_slug()))
def get_handler_for_course_type(self, context, path):
"""Gets the right handler for the given context and path."""
# TODO(psimakov): Add docs (including args and returns).
norm_path = os.path.normpath(path)
# Handle static assets here.
if norm_path.startswith(GCB_ASSETS_FOLDER_NAME):
abs_file = abspath(context.get_home_folder(), norm_path)
handler = AssetHandler(self, abs_file)
handler.request = self.request
handler.response = self.response
handler.app_context = context
debug('Course asset: %s' % abs_file)
STATIC_HANDLER_COUNT.inc()
return handler
# Handle all dynamic handlers here.
if path in ApplicationRequestHandler.urls_map:
factory = ApplicationRequestHandler.urls_map[path]
handler = factory()
handler.app_context = context
handler.request = self.request
handler.response = self.response
debug('Handler: %s > %s' % (path, handler.__class__.__name__))
DYNAMIC_HANDLER_COUNT.inc()
return handler
NO_HANDLER_COUNT.inc()
return None
def get(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.get()
finally:
count_stats(self)
unset_path_info()
def post(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.post()
finally:
count_stats(self)
unset_path_info()
def put(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.put()
finally:
count_stats(self)
unset_path_info()
def delete(self, path):
try:
set_path_info(path)
handler = self.get_handler()
if not handler:
self.error(404)
else:
handler.delete()
finally:
count_stats(self)
unset_path_info()
def assert_mapped(src, dest):
try:
set_path_info(src)
course = get_course_for_current_request()
if not dest:
assert course is None
else:
assert course.get_slug() == dest
finally:
unset_path_info()
def assert_handled(src, target_handler):
try:
set_path_info(src)
handler = ApplicationRequestHandler().get_handler()
if handler is None and target_handler is None:
return None
assert isinstance(handler, target_handler)
return handler
finally:
unset_path_info()
def assert_fails(func):
success = False
try:
func()
success = True
except Exception: # pylint: disable=W0703
pass
if success:
raise Exception()
def test_unprefix():
assert unprefix('/', '/') == '/'
assert unprefix('/a/b/c', '/a/b') == '/c'
assert unprefix('/a/b/index.html', '/a/b') == '/index.html'
assert unprefix('/a/b', '/a/b') == '/'
def test_rule_definitions():
"""Test various rewrite rule definitions."""
# Check that the default site is created when no rules are specified.
assert len(get_all_courses()) == 1
# Test that empty definition is ok.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = ''
assert len(get_all_courses()) == 1
# Test one rule parsing.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/google/pswg:/sites/pswg')
rules = get_all_courses()
assert len(get_all_courses()) == 1
rule = rules[0]
assert rule.get_slug() == '/google/pswg'
assert rule.get_home_folder() == '/sites/pswg'
# Test two rule parsing.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/a/b:/c/d, course:/e/f:/g/h')
assert len(get_all_courses()) == 2
# Test that two of the same slugs are not allowed.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'foo:/a/b:/c/d, bar:/a/b:/c/d')
assert_fails(get_all_courses)
# Test that only 'course' is supported.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'foo:/a/b:/c/d, bar:/e/f:/g/h')
assert_fails(get_all_courses)
# Cleanup.
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
# Test namespaces.
set_path_info('/')
try:
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = 'course:/:/c/d'
assert ApplicationContext.get_namespace_name_for_request() == (
'gcb-course-c-d')
finally:
unset_path_info()
def test_url_to_rule_mapping():
"""Tests mapping of a URL to a rule."""
# default mapping
assert_mapped('/favicon.ico', '/')
assert_mapped('/assets/img/foo.png', '/')
# explicit mapping
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/a/b:/c/d, course:/e/f:/g/h')
assert_mapped('/a/b', '/a/b')
assert_mapped('/a/b/', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/a/b/c', '/a/b')
assert_mapped('/e/f', '/e/f')
assert_mapped('/e/f/assets', '/e/f')
assert_mapped('/e/f/views', '/e/f')
assert_mapped('e/f', None)
assert_mapped('foo', None)
# Cleanup.
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
def test_url_to_handler_mapping_for_course_type():
"""Tests mapping of a URL to a handler for course type."""
# setup rules
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'course:/a/b:/c/d, course:/e/f:/g/h')
# setup helper classes
class FakeHandler0(object):
def __init__(self):
self.app_context = None
class FakeHandler1(object):
def __init__(self):
self.app_context = None
class FakeHandler2(object):
def __init__(self):
self.app_context = None
# Setup handler.
handler0 = FakeHandler0
handler1 = FakeHandler1
handler2 = FakeHandler2
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
ApplicationRequestHandler.bind(urls)
# Test proper handler mappings.
assert_handled('/a/b', FakeHandler0)
assert_handled('/a/b/', FakeHandler0)
assert_handled('/a/b/foo', FakeHandler1)
assert_handled('/a/b/bar', FakeHandler2)
# Test assets mapping.
handler = assert_handled('/a/b/assets/img/foo.png', AssetHandler)
assert os.path.normpath(handler.app_context.get_template_home()).endswith(
os.path.normpath('/coursebuilder/c/d/views'))
# This is allowed as we don't go out of /assets/...
handler = assert_handled(
'/a/b/assets/foo/../models/models.py', AssetHandler)
assert os.path.normpath(handler.filename).endswith(
os.path.normpath('/coursebuilder/c/d/assets/models/models.py'))
# This is not allowed as we do go out of /assets/...
assert_handled('/a/b/assets/foo/../../models/models.py', None)
# Test negative cases
assert_handled('/foo', None)
assert_handled('/baz', None)
# Site 'views' and 'data' are not accessible
assert_handled('/a/b/view/base.html', None)
assert_handled('/a/b/data/units.csv', None)
# Default mapping
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
urls = [('/', handler0), ('/foo', handler1), ('/bar', handler2)]
# Positive cases
assert_handled('/', FakeHandler0)
assert_handled('/foo', FakeHandler1)
assert_handled('/bar', FakeHandler2)
handler = assert_handled('/assets/js/main.js', AssetHandler)
assert os.path.normpath(handler.app_context.get_template_home()).endswith(
os.path.normpath('/coursebuilder/views'))
# Negative cases
assert_handled('/favicon.ico', None)
assert_handled('/e/f/index.html', None)
assert_handled('/foo/foo.css', None)
# Clean up.
ApplicationRequestHandler.bind([])
def test_special_chars():
"""Test special character encoding."""
# Test that namespace collisions are detected and are not allowed.
os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME] = (
'foo:/a/b:/c/d, bar:/a/b:/c-d')
assert_fails(get_all_courses)
# Cleanup.
del os.environ[GCB_COURSES_CONFIG_ENV_VAR_NAME]
def test_path_construction():
"""Checks that path_join() works correctly."""
# Test cases common to all platforms.
assert (os.path.normpath(path_join('/a/b', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', '/c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b', 'c')) ==
os.path.normpath('/a/b/c'))
assert (os.path.normpath(path_join('/a/b/', 'c')) ==
os.path.normpath('/a/b/c'))
# Windows-specific test cases.
drive, unused_path = os.path.splitdrive('c:\\windows')
if drive:
assert (os.path.normpath(path_join('/a/b', 'c:/d')) ==
os.path.normpath('/a/b/d'))
assert (os.path.normpath(path_join('/a/b/', 'c:/d')) ==
os.path.normpath('/a/b/d'))
def run_all_unit_tests():
test_special_chars()
test_unprefix()
test_rule_definitions()
test_url_to_rule_mapping()
test_url_to_handler_mapping_for_course_type()
test_path_construction()
if __name__ == '__main__':
DEBUG_INFO = True
run_all_unit_tests()
| Python |
#====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
#
# This software consists of voluntary contributions made by many
# individuals on behalf of the Apache Software Foundation. For more
# information on the Apache Software Foundation, please see
# <http://www.apache.org/>.
#
import os
import re
import tempfile
import shutil
ignore_pattern = re.compile('^(.svn|target|bin|classes)')
java_pattern = re.compile('^.*\.java')
annot_pattern = re.compile('import org\.apache\.http\.annotation\.')
def process_dir(dir):
files = os.listdir(dir)
for file in files:
f = os.path.join(dir, file)
if os.path.isdir(f):
if not ignore_pattern.match(file):
process_dir(f)
else:
if java_pattern.match(file):
process_source(f)
def process_source(filename):
tmp = tempfile.mkstemp()
tmpfd = tmp[0]
tmpfile = tmp[1]
try:
changed = False
dst = os.fdopen(tmpfd, 'w')
try:
src = open(filename)
try:
for line in src:
if annot_pattern.match(line):
changed = True
line = line.replace('import org.apache.http.annotation.', 'import net.jcip.annotations.')
dst.write(line)
finally:
src.close()
finally:
dst.close();
if changed:
shutil.move(tmpfile, filename)
else:
os.remove(tmpfile)
except:
os.remove(tmpfile)
process_dir('.')
| Python |
#!/usr/bin/env python
from setuptools import setup
from feedformatter import __version__ as version
setup(
name='feedformatter',
version=version,
description='A Python library for generating news feeds in RSS and Atom formats',
author='Luke Maurits',
author_email='luke@maurits.id.au',
url='http://code.google.com/p/feedparser/',
license='http://www.luke.maurits.id.au/software/bsdlicense.txt',
py_modules=['feedformatter'],
)
| Python |
#!/usr/bin/env python
from setuptools import setup
from feedformatter import __version__ as version
setup(
name='feedformatter',
version=version,
description='A Python library for generating news feeds in RSS and Atom formats',
author='Luke Maurits',
author_email='luke@maurits.id.au',
url='http://code.google.com/p/feedparser/',
license='http://www.luke.maurits.id.au/software/bsdlicense.txt',
py_modules=['feedformatter'],
)
| Python |
# Feedformatter
# Copyright (c) 2008, Luke Maurits <luke@maurits.id.au>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "TRUNK"
from cStringIO import StringIO
# This "staircase" of import attempts is ugly. If there's a nicer way to do
# this, please let me know!
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
try:
from elementtree import ElementTree as ET
except ImportError:
raise ImportError("Could not import any form of element tree!")
try:
from xml.dom.ext import PrettyPrint
from xml.dom.ext.reader.Sax import FromXml
feedformatterCanPrettyPrint = True
except ImportError:
feedformatterCanPrettyPrint = False
from time import time, strftime, localtime, mktime, struct_time, timezone
# RSS 1.0 Functions ----------
_rss1_channel_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description")
)
_rss1_item_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description")
)
# RSS 2.0 Functions ----------
_rss2_channel_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description"),
(("pubDate", "pubdate", "date", "published", "updated"), "pubDate", lambda(x): _format_datetime("rss2",x)),
(("category",), "category"),
(("language",), "language"),
(("copyright",), "copyright"),
(("webMaster",), "webmaster"),
(("image",), "image"),
(("skipHours",), "skipHours"),
(("skipDays",), "skipDays")
)
_rss2_item_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description"),
(("guid", "id"), "guid"),
(("pubDate", "pubdate", "date", "published", "updated"), "pubDate", lambda(x): _format_datetime("rss2",x)),
(("category",), "category"),
(("author",), "author", lambda(x): _rssify_author(x))
)
# Atom 1.0 ----------
_atom_feed_mappings = (
(("title",), "title"),
(("link", "url"), "id"),
(("description", "desc", "summary"), "subtitle"),
(("pubDate", "pubdate", "date", "published", "updated"), "updated", lambda(x): _format_datetime("atom",x)),
(("category",), "category"),
(("author",), "author", lambda(x): _atomise_author(x))
)
_atom_item_mappings = (
(("title",), "title"),
(("link", "url"), "id"),
(("link", "url"), "link", lambda(x): _atomise_link(x)),
(("description", "desc", "summary"), "summary"),
(("pubDate", "pubdate", "date", "published", "updated"), "updated", lambda(x): _format_datetime("atom",x)),
(("category",), "category"),
(("author",), "author", lambda(x): _atomise_author(x))
)
def _get_tz_offset():
"""
Return the current timezone's offset from GMT as a string
in the format +/-HH:MM, as required by RFC3339.
"""
seconds = -1*timezone # Python gets the offset backward! >:(
minutes = seconds/60
hours = minutes/60
minutes = minutes - hours*60
if seconds < 0:
return "-%02d:%d" % (hours, minutes)
else:
return "+%02d:%d" % (hours, minutes)
def _convert_datetime(time):
"""
Convert time, which may be one of a whole lot of things, into a
standard 9 part time tuple.
"""
if (type(time) is tuple and len(time) ==9) or type(time) is struct_time:
# Already done!
return time
elif type(time) is int or type(time) is float:
# Assume this is a seconds-since-epoch time
return localtime(time)
elif type(time) is str:
if time.isalnum():
# String is alphanumeric - a time stamp?
try:
return strptime(time, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
raise Exception("Unrecongised time format!")
else:
# Maybe this is a string of an epoch time?
try:
return localtime(float(time))
except ValueError:
# Guess not.
raise Exception("Unrecongised time format!")
else:
# No idea what this is. Give up!
raise Exception("Unrecongised time format!")
def _format_datetime(feed_type, time):
"""
Convert some representation of a date and time into a string which can be
used in a validly formatted feed of type feed_type. Raise an
Exception if this cannot be done.
"""
# First, convert time into a time structure
time = _convert_datetime(time)
# Then, convert that to the appropriate string
if feed_type is "rss2":
return strftime("%a, %d %b %Y %H:%M:%S %Z", time)
elif feed_type is "atom":
return strftime("%Y-%m-%dT%H:%M:%S", time) + _get_tz_offset();
def _atomise_link(link):
if type(link) is dict:
return dict
else:
return {"href" : link}
def _atomise_author(author):
"""
Convert author from whatever it is to a dictionary representing an
atom:Person construct.
"""
if type(author) is dict:
return author
else:
if author.startswith("http://") or author.startswith("www"):
# This is clearly a URI
return {"uri" : author}
elif "@" in author and "." in author:
# This is most probably an email address
return {"email" : author}
else:
# Must be a name
return {"name" : author}
def _rssify_author(author):
"""
Convert author from whatever it is to a plain old email string for
use in an RSS 2.0 feed.
"""
if type(author) is dict:
try:
return author["email"]
except KeyError:
return None
else:
if "@" in author and "." in author:
# Probably an email address
return author
else:
return None
def _add_subelems(root_element, mappings, dictionary):
"""
Add one subelement to root_element for each key in dictionary
which is supported by a mapping in mappings
"""
for mapping in mappings:
for key in mapping[0]:
if key in dictionary:
if len(mapping) == 2:
value = dictionary[key]
elif len(mapping) == 3:
value = mapping[2](dictionary[key])
_add_subelem(root_element, mapping[1], value)
break
def _add_subelem(root_element, name, value):
if value is None:
return
if type(value) is dict:
### HORRIBLE HACK!
if name=="link":
ET.SubElement(root_element, name, href=value["href"])
else:
subElem = ET.SubElement(root_element, name)
for key in value:
_add_subelem(subElem, key, value[key])
else:
ET.SubElement(root_element, name).text = value
def _stringify(tree, pretty):
"""
Turn an ElementTree into a string, optionally with line breaks and indentation.
"""
if pretty and feedformatterCanPrettyPrint:
string = StringIO()
doc = FromXml(ET.tostring(tree))
PrettyPrint(doc,string,indent=" ")
return string.getvalue()
else:
return ET.tostring(tree)
class Feed:
### INTERNAL METHODS ------------------------------
def __init__(self, feed=None, items=None):
if feed:
self.feed = feed
else:
self.feed = {}
if items:
self.items = items
else:
self.items = []
self.entries = self.items
### RSS 1.0 STUFF ------------------------------
def validate_rss1(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as RSS 1.0."""
# <channel> must contain "title"
if "title" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 1.0 feed must contain a title subelement")
# <channel> must contain "link"
if "link" not in self.feed:
raise InvalidFeedException("The channel element of an "
" RSS 1.0 feeds must contain a link subelement")
# <channel> must contain "description"
if "description" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 1.0 feeds must contain a description subelement")
# Each <item> must contain "title" and "link"
for item in self.items:
if "title" not in item:
raise InvalidFeedException("Each item element in an RSS 1.0 "
"feed must contain a title subelement")
if "link" not in item:
raise InvalidFeedException("Each item element in an RSS 1.0 "
"feed must contain a link subelement")
def format_rss1_string(self, validate=True, pretty=False):
"""Format the feed as RSS 1.0 and return the result as a string."""
if validate:
self.validate_rss1()
RSS1root = ET.Element( 'rdf:RDF',
{"xmlns:rdf" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"xmlns" : "http://purl.org/rss/1.0/"} )
RSS1channel = ET.SubElement(RSS1root, 'channel',
{"rdf:about" : self.feed["link"]})
_add_subelems(RSS1channel, _rss1_channel_mappings, self.feed)
RSS1contents = ET.SubElement(RSS1channel, 'items')
RSS1contents_seq = ET.SubElement (RSS1contents, 'rdf:Seq')
for item in self.items:
ET.SubElement(RSS1contents_seq, 'rdf:li', resource=item["link"])
for item in self.items:
RSS1item = ET.SubElement (RSS1root, 'item',
{"rdf:about" : item["link"]})
_add_subelems(RSS1item, _rss1_item_mappings, item)
return _stringify(RSS1root, pretty=pretty)
def format_rss1_file(self, filename, validate=True, pretty=False):
"""Format the feed as RSS 1.0 and save the result to a file."""
string = self.format_rss1_string(validate, pretty)
fp = open(filename, "w")
fp.write(string)
fp.close()
### RSS 2.0 STUFF ------------------------------
def validate_rss2(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as RSS 2.0."""
# <channel> must contain "title"
if "title" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 2.0 feed must contain a title subelement")
# <channel> must contain "link"
if "link" not in self.feed:
raise InvalidFeedException("The channel element of an "
" RSS 2.0 feeds must contain a link subelement")
# <channel> must contain "description"
if "description" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 2.0 feeds must contain a description subelement")
# Each <item> must contain at least "title" OR "description"
for item in self.items:
if not ("title" in item or "description" in item):
raise InvalidFeedException("Each item element in an RSS 2.0 "
"feed must contain at least a title or description subelement")
def format_rss2_string(self, validate=True, pretty=False):
"""Format the feed as RSS 2.0 and return the result as a string."""
if validate:
self.validate_rss2()
RSS2root = ET.Element( 'rss', {'version':'2.0'} )
RSS2channel = ET.SubElement( RSS2root, 'channel' )
_add_subelems(RSS2channel, _rss2_channel_mappings, self.feed)
for item in self.items:
RSS2item = ET.SubElement ( RSS2channel, 'item' )
_add_subelems(RSS2item, _rss2_item_mappings, item)
return _stringify(RSS2root, pretty=pretty)
def format_rss2_file(self, filename, validate=True, pretty=False):
"""Format the feed as RSS 2.0 and save the result to a file."""
string = self.format_rss2_string(validate, pretty)
fp = open(filename, "w")
fp.write(string)
fp.close()
### ATOM STUFF ------------------------------
def validate_atom(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as Atom 1.0."""
# Must have at least one "author" element in "feed" OR at least
# "author" element in each "entry".
if "author" not in self.feed:
for entry in self.entries:
if "author" not in entry:
raise InvalidFeedException("Atom feeds must have either at "
"least one author element in the feed element or at least "
" one author element in each entry element")
def format_atom_string(self, validate=True, pretty=False):
"""Format the feed as Atom 1.0 and return the result as a string."""
if validate:
self.validate_atom()
AtomRoot = ET.Element( 'feed', {"xmlns":"http://www.w3.org/2005/Atom"} )
_add_subelems(AtomRoot, _atom_feed_mappings, self.feed)
for entry in self.entries:
AtomItem = ET.SubElement ( AtomRoot, 'entry' )
_add_subelems(AtomItem, _atom_item_mappings, entry)
return _stringify(AtomRoot, pretty=pretty)
def format_atom_file(self, filename, validate=True, pretty=False):
"""Format the feed as Atom 1.0 and save the result to a file."""
string = self.format_atom_string(validate, pretty)
fp = open(filename, "w")
fp.write(string)
fp.close()
class InvalidFeedException(Exception):
pass
### FACTORY FUNCTIONS ------------------------------
def fromUFP(ufp):
return Feed(ufp["feed"], ufp["items"])
### MAIN ------------------------------
def main():
feed = Feed()
feed.feed["title"] = "Test Feed"
feed.feed["link"] = "http://code.google.com/p/feedformatter/"
feed.feed["author"] = "Luke Maurits"
feed.feed["description"] = "A simple test feed for the feedformatter project"
item = {}
item["title"] = "Test item"
item["link"] = "http://www.python.org"
item["description"] = "Python programming language"
item["guid"] = "1234567890"
feed.items.append(item)
print("---- RSS 1.0 ----")
print feed.format_rss1_string(pretty=True)
print("---- RSS 2.0 ----")
print feed.format_rss2_string(pretty=True)
print("---- Atom 1.0 ----")
print feed.format_atom_string(pretty=True)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
from setuptools import setup
from feedformatter import __version__ as version
setup(
name='feedformatter',
version=version,
description='A Python library for generating news feeds in RSS and Atom formats',
author='Luke Maurits',
author_email='luke@maurits.id.au',
url='http://code.google.com/p/feedparser/',
license='http://www.luke.maurits.id.au/software/bsdlicense.txt',
py_modules=['feedformatter'],
)
| Python |
#!/usr/bin/env python
from setuptools import setup
from feedformatter import __version__ as version
setup(
name='feedformatter',
version=version,
description='A Python library for generating news feeds in RSS and Atom formats',
author='Luke Maurits',
author_email='luke@maurits.id.au',
url='http://code.google.com/p/feedparser/',
license='http://www.luke.maurits.id.au/software/bsdlicense.txt',
py_modules=['feedformatter'],
)
| Python |
'''module to create atom and rss feeds'''
# Feedformatter
# Copyright (c) 2008, Luke Maurits <luke@maurits.id.au>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
PY3 = sys.version_info[0] == 3
__version__ = "0.5"
__author__ = "Luke Maurits, Michael Stella, Mariano Guerra"
__copyright__ = "Copyright 2008 Luke Maurits"
if PY3:
from io import StringIO
basestring = str
else:
if sys.version_info[1] < 6:
bytes = str
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# This "staircase" of import attempts is ugly. If there's a nicer way to do
# this, please let me know!
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
try:
from elementtree import ElementTree as ET
except ImportError:
raise ImportError("Could not import any form of element tree!")
try:
from xml.dom.ext import PrettyPrint
from xml.dom.ext.reader.Sax import FromXml
CAN_PRETTY_PRINT = True
except ImportError:
CAN_PRETTY_PRINT = False
import time
import datetime
def _get_tz_offset():
"""
Return the current timezone's offset from GMT as a string
in the format +/-HH:MM, as required by RFC3339.
"""
seconds = -1 * time.timezone # Python gets the offset backward! >:(
minutes = seconds / 60
hours = minutes / 60
minutes = minutes - hours * 60
hours = abs(hours)
if seconds < 0:
return "-%02d:%02d" % (hours, minutes)
else:
return "+%02d:%02d" % (hours, minutes)
def _convert_datetime(dtime):
"""
Convert dtime, which may be one of a whole lot of things, into a
standard 9 part time tuple.
"""
if type(dtime) is datetime.datetime:
return dtime.timetuple()
elif ((type(dtime) is tuple and len(dtime) == 9) or
type(dtime) is time.struct_time):
# Already done!
return dtime
elif type(dtime) is int or type(dtime) is float:
# Assume this is a seconds-since-epoch time
return time.localtime(dtime)
elif isinstance(dtime, basestring):
# A time stamp?
try:
return time.strptime(dtime, "%a, %d %b %Y %H:%M:%S %Z")
except ValueError:
# Maybe this is a string of an epoch time?
try:
return time.localtime(float(dtime))
except ValueError:
# Guess not.
raise Exception("Unrecongised time format!")
else:
# No idea what this is. Give up!
raise Exception("Unrecongised time format!")
def _format_datetime(feed_type, dtime):
"""
Convert some representation of a date and time into a string which can be
used in a validly formatted feed of type feed_type. Raise an
Exception if this cannot be done.
"""
# First, convert time into a time structure
dtime = _convert_datetime(dtime)
# Then, convert that to the appropriate string
if feed_type is "rss2":
return time.strftime("%a, %d %b %Y %H:%M:%S %Z", dtime)
elif feed_type is "atom":
return time.strftime("%Y-%m-%dT%H:%M:%S", dtime) + _get_tz_offset()
def _atomise_id(tag):
"""return a tag in a suitable format for atom"""
if type(tag) is dict:
return tag['href'].replace('http://', 'tag:')
return tag.replace('http://', 'tag:')
def _atomise_link(link, rel=None):
"""return a link in a suitable format for atom"""
if type(link) is dict:
if 'type' not in link:
link['type'] = 'text/html'
if rel and 'rel' not in link:
link['rel'] = rel
return link
else:
result = {'href' : link, 'type': 'text/html'}
if rel:
result['rel'] = rel
return result
def _atomise_author(author):
"""
Convert author from whatever it is to a dictionary representing an
atom:Person construct.
"""
if type(author) is dict:
return author
else:
if author.startswith("http://") or author.startswith("www"):
# This is clearly a URI
return {"uri" : author}
elif "@" in author and "." in author:
# This is most probably an email address
return {"email" : author}
else:
# Must be a name
return {"name" : author}
def _rssify_author(author):
"""
Convert author from whatever it is to a plain old email string for
use in an RSS 2.0 feed.
"""
if type(author) is dict:
return author.get("email", None)
else:
if "@" in author and "." in author:
# Probably an email address
return author
else:
return None
def _rssify_link(link):
"""return a link in a suitable format"""
if type(link) is dict:
return link['href']
else:
return link
def _format_content(content):
"""Converts the ATOM 'content' node into a dict,
which will allow one to pass in a dict which has
an optionaly 'type' argument
"""
if type(content) is dict:
if not 'type' in content:
content['type'] = 'text'
return content
else:
return {
'type': 'html',
'content': content,
}
def _add_subelems(root_element, mappings, dictionary):
"""
Add one subelement to root_element for each key in dictionary
which is supported by a mapping in mappings
"""
for mapping in mappings:
for key in mapping[0]:
if key in dictionary:
if len(mapping) == 2:
value = dictionary[key]
elif len(mapping) == 3:
value = mapping[2](dictionary[key])
_add_subelem(root_element, mapping[1], value)
break
def _add_subelem(root_element, name, value):
"""ad a subelement to *root_element*"""
if value is None:
return
if type(value) is dict:
### HORRIBLE HACK!
if name == "link":
ET.SubElement(root_element, name, value)
elif name == 'content':
# A wee hack too, the content node must be
# converted to a CDATA block. This is a sort of cheat, see:
# http://stackoverflow.com
# /questions/174890/how-to-output-cdata-using-elementtree
element = ET.Element(name, type= value['type'])
element.append(cdata(value['content']))
root_element.append(element)
else:
sub_elem = ET.SubElement(root_element, name)
for key in value:
_add_subelem(sub_elem, key, value[key])
else:
ET.SubElement(root_element, name).text = value
def _stringify(tree, pretty):
"""
Turn an ElementTree into a string, optionally with line breaks and indentation.
"""
if pretty and CAN_PRETTY_PRINT:
string = StringIO()
doc = FromXml(_element_to_string(tree))
PrettyPrint(doc, string, indent=" ")
return string.getvalue()
else:
return _element_to_string(tree)
def _element_to_string(element, encoding=None):
"""
This replaces ElementTree's tostring() function
with one that will use our local ElementTreeCDATA
class instead
"""
class Dummy(object):
"""a dummy class that has the required fields to be used in
the write method call below"""
def __init__(self, write_function):
self.write = write_function
data = []
if encoding is None:
encoding = 'utf-8'
file_like = Dummy(data.append)
ElementTreeCDATA(element).write(file_like, encoding)
new_data = []
for item in data:
if item is None:
new_item = ""
elif isinstance(item, bytes):
new_item = item.decode(encoding)
else:
new_item = item
new_data.append(new_item)
return ''.join(new_data)
class Feed(object):
"""class that represents a feed object"""
def __init__(self, feed=None, items=None):
if feed:
self.feed = feed
else:
self.feed = {}
if items:
self.items = items
else:
self.items = []
self.entries = self.items
### RSS 1.0 STUFF ------------------------------
def validate_rss1(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as RSS 1.0."""
# <channel> must contain "title"
if "title" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 1.0 feed must contain a title subelement")
# <channel> must contain "link"
if "link" not in self.feed:
raise InvalidFeedException("The channel element of an "
" RSS 1.0 feeds must contain a link subelement")
# <channel> must contain "description"
if "description" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 1.0 feeds must contain a description subelement")
# Each <item> must contain "title" and "link"
for item in self.items:
if "title" not in item:
raise InvalidFeedException("Each item element in an RSS 1.0 "
"feed must contain a title subelement")
if "link" not in item:
raise InvalidFeedException("Each item element in an RSS 1.0 "
"feed must contain a link subelement")
def format_rss1_string(self, validate=True, pretty=False):
"""Format the feed as RSS 1.0 and return the result as a string."""
if validate:
self.validate_rss1()
rss1_root = ET.Element('rdf:RDF',
{"xmlns:rdf" : "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"xmlns" : "http://purl.org/rss/1.0/"})
rss1_channel = ET.SubElement(rss1_root, 'channel',
{"rdf:about" : self.feed["link"]})
_add_subelems(rss1_channel, _rss1_channel_mappings, self.feed)
rss1_contents = ET.SubElement(rss1_channel, 'items')
rss1_contents_seq = ET.SubElement (rss1_contents, 'rdf:Seq')
for item in self.items:
ET.SubElement(rss1_contents_seq, 'rdf:li', resource=item["link"])
for item in self.items:
rss1_item = ET.SubElement(rss1_root, 'item',
{"rdf:about" : item["link"]})
_add_subelems(rss1_item, _rss1_item_mappings, item)
return _stringify(rss1_root, pretty=pretty)
def format_rss1_file(self, filename, validate=True, pretty=False):
"""Format the feed as RSS 1.0 and save the result to a file."""
string = self.format_rss1_string(validate, pretty)
handle = open(filename, "w")
handle.write(string)
handle.close()
### RSS 2.0 STUFF ------------------------------
def validate_rss2(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as RSS 2.0."""
# <channel> must contain "title"
if "title" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 2.0 feed must contain a title subelement")
# <channel> must contain "link"
if "link" not in self.feed:
raise InvalidFeedException("The channel element of an "
" RSS 2.0 feeds must contain a link subelement")
# <channel> must contain "description"
if "description" not in self.feed:
raise InvalidFeedException("The channel element of an "
"RSS 2.0 feeds must contain a description subelement")
# Each <item> must contain at least "title" OR "description"
for item in self.items:
if not ("title" in item or "description" in item):
raise InvalidFeedException("Each item element in an RSS 2.0 "
"feed must contain at least a title or description"
" subelement")
def format_rss2_string(self, validate=True, pretty=False):
"""Format the feed as RSS 2.0 and return the result as a string."""
if validate:
self.validate_rss2()
rss2_root = ET.Element('rss', {'version':'2.0'})
rss2_channel = ET.SubElement(rss2_root, 'channel')
_add_subelems(rss2_channel, _rss2_channel_mappings, self.feed)
for item in self.items:
rss2_item = ET.SubElement(rss2_channel, 'item')
_add_subelems(rss2_item, _rss2_item_mappings, item)
return ('<?xml version="1.0" encoding="UTF-8" ?>\n' +
_stringify(rss2_root, pretty=pretty))
def format_rss2_file(self, filename, validate=True, pretty=False):
"""Format the feed as RSS 2.0 and save the result to a file."""
string = self.format_rss2_string(validate, pretty)
handle = open(filename, "w")
handle.write(string)
handle.close()
### ATOM STUFF ------------------------------
def validate_atom(self):
"""Raise an InvalidFeedException if the feed cannot be validly
formatted as Atom 1.0."""
# Must have at least one "author" element in "feed" OR at least
# "author" element in each "entry".
if "author" not in self.feed:
for entry in self.entries:
if "author" not in entry:
raise InvalidFeedException("Atom feeds must have either at "
"least one author element in the feed element or at "
" least one author element in each entry element")
def format_atom_string(self, validate=True, pretty=False):
"""Format the feed as Atom 1.0 and return the result as a string."""
if validate:
self.validate_atom()
atom_root = ET.Element('feed', {"xmlns":"http://www.w3.org/2005/Atom"})
_add_subelems(atom_root, _atom_feed_mappings, self.feed)
for entry in self.entries:
atom_item = ET.SubElement( atom_root, 'entry')
_add_subelems(atom_item, _atom_item_mappings, entry)
return ('<?xml version="1.0" encoding="UTF-8" ?>\n' +
_stringify(atom_root, pretty=pretty))
def format_atom_file(self, filename, validate=True, pretty=False):
"""Format the feed as Atom 1.0 and save the result to a file."""
string = self.format_atom_string(validate, pretty)
handle = open(filename, "w")
handle.write(string)
handle.close()
class InvalidFeedException(Exception):
"""Exception thrown when manipulating an invalid feed"""
pass
def cdata(text=None):
"""create and return a CDATA element"""
if text is None:
text = ""
element = ET.Element("CDATA")
element.text = text
return element
class ElementTreeCDATA(ET.ElementTree):
"""
Subclass of ElementTree which handles CDATA blocks reasonably
"""
def _write(self, file_like, node, encoding, namespaces):
"""write this element representation to *file_like*"""
if node.tag == "CDATA":
text = node.text.encode(encoding)
file_like.write("\n<![CDATA[%s]]>\n" % text)
else:
ET.ElementTree._write(self, file_like, node, encoding, namespaces)
# RSS 1.0 Functions ----------
_rss1_channel_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description")
)
_rss1_item_mappings = (
(("title",), "title"),
(("link", "url"), "link"),
(("description", "desc", "summary"), "description")
)
# RSS 2.0 Functions ----------
_rss2_channel_mappings = (
(("title",), "title"),
(("link", "url"), "link", _rssify_link),
(("description", "desc", "summary"), "description"),
(("pubDate", "pubdate", "date", "published", "updated"), "pubDate",
lambda x: _format_datetime("rss2",x)),
(("category",), "category"),
(("language",), "language"),
(("copyright",), "copyright"),
(("webMaster",), "webmaster"),
(("image",), "image"),
(("skipHours",), "skipHours"),
(("skipDays",), "skipDays")
)
_rss2_item_mappings = (
(("title",), "title"),
(("link", "url"), "link", _rssify_link),
(("description", "desc", "summary"), "description"),
(("guid", "id"), "guid"),
(("pubDate", "pubdate", "date", "published", "updated"), "pubDate",
lambda x: _format_datetime("rss2",x)),
(("category",), "category"),
(("author",), "author", _rssify_author)
)
# Atom 1.0 ----------
_atom_feed_mappings = (
(("title",), "title"),
(("id", "link", "url"), "id", _atomise_id),
(("link", "url"), "link", _atomise_link),
(("description", "desc", "summary"), "subtitle"),
(("pubDate", "pubdate", "date", "published", "updated"), "updated",
lambda x: _format_datetime("atom",x)),
(("category",), "category"),
(("author",), "author", _atomise_author)
)
_atom_item_mappings = (
(("title",), "title"),
(("link", "url"), "link", lambda x: _atomise_link(x, rel='alternate')),
(("id", "link", "url"), "id", _atomise_id),
(("description", "desc", "summary"), "summary"),
(("content",), "content", _format_content),
(("pubDate", "pubdate", "date", "published", "updated"), "published",
lambda x: _format_datetime("atom",x)),
(("updated",), "updated", lambda x: _format_datetime("atom",x)),
(("category",), "category"),
(("author",), "author", _atomise_author)
)
### FACTORY FUNCTIONS ------------------------------
def from_ufp(ufp):
"""build a Feed object from an ufp (?)"""
return Feed(ufp["feed"], ufp["items"])
### MAIN ------------------------------
def main():
"""
main function called when the module is invoked from the command
line, display a small demo of the module
"""
def show(*args):
"""a cross version replacement for print that is useful for the demo
here"""
sys.stdout.write(" ".join([str(arg) for arg in args]))
sys.stdout.write("\n")
feed = Feed()
feed.feed["title"] = "Test Feed"
feed.feed["link"] = "http://code.google.com/p/feedformatter/"
feed.feed["author"] = "Luke Maurits"
feed.feed["description"] = "A simple test feed for feedformatter"
item = {}
item["title"] = "Test item"
item["link"] = "http://www.python.org"
item["description"] = "Python programming language"
item["guid"] = "1234567890"
feed.items.append(item)
show("---- RSS 1.0 ----")
show(feed.format_rss1_string(pretty=True))
show("---- RSS 2.0 ----")
show(feed.format_rss2_string(pretty=True))
show("---- Atom 1.0 ----")
show(feed.format_atom_string(pretty=True))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
import urllib
from django.utils import simplejson
class HatenaBookmarkFlare(webapp.RequestHandler):
def get(self):
url = self.request.get('url', default_value='')
if url == '':
self.get_flare_unit()
else:
self.get_flare_item(url)
def get_flare_unit(self):
path_url = self.request.path_url
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlareUnit>
<Catalog>
<Title>Save to hatena bookmark</Title>
<Description>Save this item to the hatena bookmarking service.</Description>
</Catalog>
<DynamicFlare href="%s?url=${link}"/>
<SampleFlare>
<Text>Save to hatena bookmark (23 saves)</Text>
<Link href="http://b.hatena.ne.jp/entry/{$link}" />
</SampleFlare>
</FeedFlareUnit>
""" % path_url)
def get_flare_item(self, url):
count = 0
json_url = 'http://b.hatena.ne.jp/entry/json/%s' % url
try:
result = urlfetch.fetch(json_url)
if result.status_code == 200:
content = result.content[1:-1]
if content != 'null':
json_obj = simplejson.loads(content)
count = int(json_obj['count'])
except:
pass
text = 'Save to hatena bookmark'
if count == 1:
text += ' (%d save)' % count
elif count > 1:
text += ' (%d saves)' % count
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlare>
<Text>%s</Text>
<Link href="http://b.hatena.ne.jp/entry/%s" />
</FeedFlare>
""" % (text, url))
class HatenaBookmarkFlareCompat(HatenaBookmarkFlare):
def get(self, url):
url = urllib.unquote(url)
self.get_flare_item(url)
def main():
application = webapp.WSGIApplication(
[
('/hatena', HatenaBookmarkFlare),
('/hatena/(.+)', HatenaBookmarkFlareCompat),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
import urllib
from django.utils import simplejson
class LivedoorClipFlare(webapp.RequestHandler):
def get(self):
url = self.request.get('url', default_value='')
if url == '':
self.get_flare_unit()
else:
self.get_flare_item(url)
def get_flare_unit(self):
path_url = self.request.path_url
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlareUnit>
<Catalog>
<Title>Save to livedoor clip</Title>
<Description>Save this item to the "livedoor clip" bookmarking service.</Description>
</Catalog>
<DynamicFlare href="%s?url=${link}"/>
<SampleFlare>
<Text>Save to livedoor clip (23 saves)</Text>
<Link href="http://clip.livedoor.com/redirect?link={$link}" />
</SampleFlare>
</FeedFlareUnit>
""" % path_url)
def get_flare_item(self, url):
count = 0
json_url = 'http://api.clip.livedoor.com/json/comments?link=%s' % url
try:
result = urlfetch.fetch(json_url)
if result.status_code == 200:
content = result.content
json_obj = simplejson.loads(content)
if json_obj['isSuccess'] == 1:
count = int(json_obj['total_clip_count'])
except:
pass
text = 'Save to livedoor clip'
if count == 1:
text += ' (%d save)' % count
elif count > 1:
text += ' (%d saves)' % count
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlare>
<Text>%s</Text>
<Link href="http://clip.livedoor.com/redirect?link=%s" />
</FeedFlare>
""" % (text, urllib.quote(url)))
class LivedoorClipFlareCompat(LivedoorClipFlare):
def get(self, url):
url = urllib.unquote(url)
self.get_flare_item(url)
def main():
application = webapp.WSGIApplication(
[
('/livedoor', LivedoorClipFlare),
('/livedoor/(.+)', LivedoorClipFlareCompat),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class RootHandler(webapp.RequestHandler):
def get(self):
path_url = self.request.path_url
self.response.out.write("""
<html>
<body>
<h1>FeedFlare-AppEngine</h1>
<dl>
<dt>code</dt><dd><a href="http://code.google.com/p/feedflare-appengine/">feedflare-appengine - Google Code</a></dd>
<dt>maintainer</dt><dd><a href="http://blog.as-is.net/">Hirotaka Ogawa</a></dd>
</dl>
<h2>FeedFlare Catalog</h2>
<ul>
<li><a href="%shatena">Hatena Bookmark</a></li>
<li><a href="%slivedoor">Livedoor Clip</a></li>
</ul>
</body>
</html>
""" % (path_url, path_url))
def main():
application = webapp.WSGIApplication(
[
('/', RootHandler),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
import urllib
from django.utils import simplejson
class HatenaBookmarkFlare(webapp.RequestHandler):
def get(self):
url = self.request.get('url', default_value='')
if url == '':
self.get_flare_unit()
else:
self.get_flare_item(url)
def get_flare_unit(self):
path_url = self.request.path_url
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlareUnit>
<Catalog>
<Title>Save to hatena bookmark</Title>
<Description>Save this item to the hatena bookmarking service.</Description>
</Catalog>
<DynamicFlare href="%s?url=${link}"/>
<SampleFlare>
<Text>Save to hatena bookmark (23 saves)</Text>
<Link href="http://b.hatena.ne.jp/entry/{$link}" />
</SampleFlare>
</FeedFlareUnit>
""" % path_url)
def get_flare_item(self, url):
count = 0
json_url = 'http://b.hatena.ne.jp/entry/json/%s' % url
try:
result = urlfetch.fetch(json_url)
if result.status_code == 200:
content = result.content[1:-1]
if content != 'null':
json_obj = simplejson.loads(content)
count = int(json_obj['count'])
except:
pass
text = 'Save to hatena bookmark'
if count == 1:
text += ' (%d save)' % count
elif count > 1:
text += ' (%d saves)' % count
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlare>
<Text>%s</Text>
<Link href="http://b.hatena.ne.jp/entry/%s" />
</FeedFlare>
""" % (text, url))
class HatenaBookmarkFlareCompat(HatenaBookmarkFlare):
def get(self, url):
url = urllib.unquote(url)
self.get_flare_item(url)
def main():
application = webapp.WSGIApplication(
[
('/hatena', HatenaBookmarkFlare),
('/hatena/(.+)', HatenaBookmarkFlareCompat),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
import urllib
from django.utils import simplejson
class LivedoorClipFlare(webapp.RequestHandler):
def get(self):
url = self.request.get('url', default_value='')
if url == '':
self.get_flare_unit()
else:
self.get_flare_item(url)
def get_flare_unit(self):
path_url = self.request.path_url
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlareUnit>
<Catalog>
<Title>Save to livedoor clip</Title>
<Description>Save this item to the "livedoor clip" bookmarking service.</Description>
</Catalog>
<DynamicFlare href="%s?url=${link}"/>
<SampleFlare>
<Text>Save to livedoor clip (23 saves)</Text>
<Link href="http://clip.livedoor.com/redirect?link={$link}" />
</SampleFlare>
</FeedFlareUnit>
""" % path_url)
def get_flare_item(self, url):
count = 0
json_url = 'http://api.clip.livedoor.com/json/comments?link=%s' % url
try:
result = urlfetch.fetch(json_url)
if result.status_code == 200:
content = result.content
json_obj = simplejson.loads(content)
if json_obj['isSuccess'] == 1:
count = int(json_obj['total_clip_count'])
except:
pass
text = 'Save to livedoor clip'
if count == 1:
text += ' (%d save)' % count
elif count > 1:
text += ' (%d saves)' % count
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write("""<FeedFlare>
<Text>%s</Text>
<Link href="http://clip.livedoor.com/redirect?link=%s" />
</FeedFlare>
""" % (text, urllib.quote(url)))
class LivedoorClipFlareCompat(LivedoorClipFlare):
def get(self, url):
url = urllib.unquote(url)
self.get_flare_item(url)
def main():
application = webapp.WSGIApplication(
[
('/livedoor', LivedoorClipFlare),
('/livedoor/(.+)', LivedoorClipFlareCompat),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
class RootHandler(webapp.RequestHandler):
def get(self):
path_url = self.request.path_url
self.response.out.write("""
<html>
<body>
<h1>FeedFlare-AppEngine</h1>
<dl>
<dt>code</dt><dd><a href="http://code.google.com/p/feedflare-appengine/">feedflare-appengine - Google Code</a></dd>
<dt>maintainer</dt><dd><a href="http://blog.as-is.net/">Hirotaka Ogawa</a></dd>
</dl>
<h2>FeedFlare Catalog</h2>
<ul>
<li><a href="%shatena">Hatena Bookmark</a></li>
<li><a href="%slivedoor">Livedoor Clip</a></li>
</ul>
</body>
</html>
""" % (path_url, path_url))
def main():
application = webapp.WSGIApplication(
[
('/', RootHandler),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# feedmaker.py
#
# The MIT License
#
# Copyright (c) 2011 Clifton Chandler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import xml.etree.ElementTree
import os
import sys
import glob
import time
import urllib
import operator
import argparse
import ConfigParser
# read a configuration file
def ReadConfig():
global config
config = ConfigParser.ConfigParser()
# default streamripper options
config.add_section("streamripper")
config.set("streamripper", "stream_url", "http://yp.shoutcast.com/sbin/tunein-station.pls?id=1704166")
config.set("streamripper", "time_to_rip", "7200")
config.set("streamripper", "user_agent", "WinampMPEG/5.0")
config.set("streamripper", "options", "")
config.set("streamripper", "bin_location", "streamripper")
# default media folder settings
config.add_section("media")
config.set("media","location","/path/to/media/files")
config.set("media","max_folder_size","536870912")
config.set("media","glob","*.mp3")
config.set("media","mime_type","audio/mpeg")
# default podcast xml options
config.add_section("podcast")
config.set("podcast","out_file","/path/to/podcast/file")
config.set("podcast","title","Podcast Title")
config.set("podcast","url_base","http://server/path/")
try:
conf_file_path = os.path.join( sys.path[0], args.conf_file )
if args.write_conf_file == True:
with open(args.conf_file_path, 'wb') as configfile:
config.write(configfile)
if ( os.path.exists( conf_file_path ) == True ):
Log( LogLevel.INFO, "Reading config file " + conf_file_path + " ..." )
config.read( conf_file_path )
else:
Log( LogLevel.ERROR, "Config file does not exist: " + args.conf_file )
# Check media location
path = config.get("media", "location")
if os.path.exists( path ) == False:
raise RuntimeError("Media location does not exist: " + path)
# Check podcast output file
path = config.get("podcast","out_file")
path = os.path.dirname( path )
if os.path.exists( path ) == False:
raise RuntimeError("Podcast output path does not exist: " + path)
except ConfigParser.Error, err:
Log( LogLevel.ERROR, "Config file parser error: " + str(err) )
exit(1)
except RuntimeError, err:
Log( LogLevel.ERROR, "Config file error: " + str(err) )
exit(1)
except IOError, err:
Log( LogLevel.ERROR, "I/O Error: " + str(err) )
exit(1)
# Rip the stream
def RipStream():
# Streamripper command line options
cmd_line = config.get("streamripper","bin_location") + " " # add command to command line
cmd_line += config.get("streamripper","stream_url") # stream to rip
cmd_line += " -d " + config.get("media","location") # set dir for mp3 files
cmd_line += " -s" # don't create a directory for each stream
cmd_line += " -u " + config.get("streamripper","user_agent") # Set user agent
cmd_line += " -l " + config.get("streamripper","time_to_rip") # amount of time to rip
if args.verbose == False:
cmd_line += " --quiet" # don't show any output
cmd_line += " --xs-none" # don't do space detection
cmd_line += " -q" # add a sequence number to the mp3 files
cmd_line += " " + config.get("streamripper","options") # add the additional options configured in the config file
Log( LogLevel.DEBUG, "Calling ripper with command: " + cmd_line )
os.system(cmd_line)
# Generate the mp3 file list
def GenFileList():
# get file list
flist = glob.glob( os.path.join( config.get("media","location"), config.get("media","glob") ) )
# sort the file list
for i in range(len(flist)):
statinfo = os.stat(flist[i])
flist[i] = flist[i],statinfo.st_size,statinfo.st_ctime
flist.sort(key=operator.itemgetter(2), reverse=True)
return flist
# Delete files
def DeleteFiles(file_list):
# Calc folder size
size = 0
for file in file_list:
size += file[1]
del_index = len( file_list ) - 1
while size > int(config.get("media","max_folder_size")):
file_to_delete = file_list.pop()
Log( LogLevel.DEBUG, "Deleting file: " + file_to_delete[0] )
os.remove( file_to_delete[0] )
size -= file_to_delete[1]
# in-place prettyprint formatter (from http://effbot.org/zone/element-lib.htm#prettyprint)
def Indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
Indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# Generate the XML podcast file
def GenXmlFile(file_list):
import xml.etree.ElementTree as ET
# build rss feed structure
rss = ET.Element("rss")
rss.set("xmlns:itunes", "http://www.itunes.com/dtds/podcast-1.0.dtd")
rss.set("version", "2.0")
# channel element
channel = ET.SubElement(rss, "channel")
# Add title
title = ET.SubElement(channel, "title")
title.text = config.get("podcast","title")
# Add description
desc = ET.SubElement(channel, "description")
desc.text = "Feedmaker Podcast"
# Loop through the mp3 files
for file in file_list:
Log( LogLevel.DEBUG, "Adding: " + file[0] )
# determine url of file
url = config.get("podcast","url_base") + urllib.quote( os.path.basename(file[0]) )
# Add an item
item = ET.SubElement(channel, "item")
item_title = ET.SubElement(item, "title")
item_title.text = os.path.basename(file[0])
item_enclosure = ET.SubElement(item, "enclosure")
item_enclosure.set("url", url)
item_enclosure.set("type", config.get("media","mime_type"))
item_guid = ET.SubElement(item, "guid")
item_guid.text = url
item_pubdate = ET.SubElement(item, "pubDate")
item_pubdate.text = time.ctime(os.path.getctime(file[0]))
# cleanup the tree
Indent(rss)
# wrap it in an ElementTree instance
tree = ET.ElementTree(rss)
# write to XML file
tree.write(config.get("podcast","out_file"))
# Parse the command line options
def ParseArgs():
global args
default_config_path = os.path.join( sys.path[0], "feedmaker.conf" )
# Set up the argument parser
parser = argparse.ArgumentParser(description='Create podcast from radio stream.')
parser.add_argument('-x', action='store_true', default=False, dest='xml_only', help='Generate the XML file only')
parser.add_argument('-v', action='store_true', default=False, dest='verbose', help='Verbose')
parser.add_argument('-q', action='store_true', default=False, dest='quiet', help='Quiet mode')
parser.add_argument('-c', action='store', default=default_config_path, dest='conf_file', help='Config file')
parser.add_argument('-w', action='store_true', default=False, dest='write_conf_file', help='Write the config file with defaults')
# Parse the args
args = parser.parse_args()
# Define loging level
class LogLevel:
ERROR=1
INFO=2
DEBUG=3
# Log to the console using different log levels
def Log(level,log_string):
if level==LogLevel.ERROR:
print "ERROR - " + log_string
elif level==LogLevel.INFO:
if args.quiet == False:
print log_string
elif level==LogLevel.DEBUG:
if args.verbose == True:
print log_string
else:
raise Exception("Unknown logging level")
# main entry point
def main():
# Parse the argument
ParseArgs()
# Read the config file
ReadConfig()
# Rip the stream
if args.xml_only == False:
Log( LogLevel.INFO, "Ripping stream ..." )
RipStream()
# Get the MP3 list
Log( LogLevel.INFO, "Getting file list from folder " + config.get("media","location") + " ..." )
flist = GenFileList()
# Remove old files
Log( LogLevel.INFO, "Deleting old files ..." )
DeleteFiles(flist)
# Generate the XML file
Log( LogLevel.INFO, "Generating " + config.get("podcast","out_file") + " ..." )
GenXmlFile(flist)
Log( LogLevel.INFO, "Done." )
return 0
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# feedmaker.py
#
# The MIT License
#
# Copyright (c) 2011 Clifton Chandler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import xml.etree.ElementTree
import os
import sys
import glob
import time
import urllib
import operator
import argparse
import ConfigParser
# read a configuration file
def ReadConfig():
global config
config = ConfigParser.ConfigParser()
# default streamripper options
config.add_section("streamripper")
config.set("streamripper", "stream_url", "http://yp.shoutcast.com/sbin/tunein-station.pls?id=1704166")
config.set("streamripper", "time_to_rip", "7200")
config.set("streamripper", "user_agent", "WinampMPEG/5.0")
config.set("streamripper", "options", "")
config.set("streamripper", "bin_location", "streamripper")
# default media folder settings
config.add_section("media")
config.set("media","location","/path/to/media/files")
config.set("media","max_folder_size","536870912")
config.set("media","glob","*.mp3")
config.set("media","mime_type","audio/mpeg")
# default podcast xml options
config.add_section("podcast")
config.set("podcast","out_file","/path/to/podcast/file")
config.set("podcast","title","Podcast Title")
config.set("podcast","url_base","http://server/path/")
try:
conf_file_path = os.path.join( sys.path[0], args.conf_file )
if args.write_conf_file == True:
with open(args.conf_file_path, 'wb') as configfile:
config.write(configfile)
if ( os.path.exists( conf_file_path ) == True ):
Log( LogLevel.INFO, "Reading config file " + conf_file_path + " ..." )
config.read( conf_file_path )
else:
Log( LogLevel.ERROR, "Config file does not exist: " + args.conf_file )
# Check media location
path = config.get("media", "location")
if os.path.exists( path ) == False:
raise RuntimeError("Media location does not exist: " + path)
# Check podcast output file
path = config.get("podcast","out_file")
path = os.path.dirname( path )
if os.path.exists( path ) == False:
raise RuntimeError("Podcast output path does not exist: " + path)
except ConfigParser.Error, err:
Log( LogLevel.ERROR, "Config file parser error: " + str(err) )
exit(1)
except RuntimeError, err:
Log( LogLevel.ERROR, "Config file error: " + str(err) )
exit(1)
except IOError, err:
Log( LogLevel.ERROR, "I/O Error: " + str(err) )
exit(1)
# Rip the stream
def RipStream():
# Streamripper command line options
cmd_line = config.get("streamripper","bin_location") + " " # add command to command line
cmd_line += config.get("streamripper","stream_url") # stream to rip
cmd_line += " -d " + config.get("media","location") # set dir for mp3 files
cmd_line += " -s" # don't create a directory for each stream
cmd_line += " -u " + config.get("streamripper","user_agent") # Set user agent
cmd_line += " -l " + config.get("streamripper","time_to_rip") # amount of time to rip
if args.verbose == False:
cmd_line += " --quiet" # don't show any output
cmd_line += " --xs-none" # don't do space detection
cmd_line += " -q" # add a sequence number to the mp3 files
cmd_line += " " + config.get("streamripper","options") # add the additional options configured in the config file
Log( LogLevel.DEBUG, "Calling ripper with command: " + cmd_line )
os.system(cmd_line)
# Generate the mp3 file list
def GenFileList():
# get file list
flist = glob.glob( os.path.join( config.get("media","location"), config.get("media","glob") ) )
# sort the file list
for i in range(len(flist)):
statinfo = os.stat(flist[i])
flist[i] = flist[i],statinfo.st_size,statinfo.st_ctime
flist.sort(key=operator.itemgetter(2), reverse=True)
return flist
# Delete files
def DeleteFiles(file_list):
# Calc folder size
size = 0
for file in file_list:
size += file[1]
del_index = len( file_list ) - 1
while size > int(config.get("media","max_folder_size")):
file_to_delete = file_list.pop()
Log( LogLevel.DEBUG, "Deleting file: " + file_to_delete[0] )
os.remove( file_to_delete[0] )
size -= file_to_delete[1]
# in-place prettyprint formatter (from http://effbot.org/zone/element-lib.htm#prettyprint)
def Indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
Indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
# Generate the XML podcast file
def GenXmlFile(file_list):
import xml.etree.ElementTree as ET
# build rss feed structure
rss = ET.Element("rss")
rss.set("xmlns:itunes", "http://www.itunes.com/dtds/podcast-1.0.dtd")
rss.set("version", "2.0")
# channel element
channel = ET.SubElement(rss, "channel")
# Add title
title = ET.SubElement(channel, "title")
title.text = config.get("podcast","title")
# Add description
desc = ET.SubElement(channel, "description")
desc.text = "Feedmaker Podcast"
# Loop through the mp3 files
for file in file_list:
Log( LogLevel.DEBUG, "Adding: " + file[0] )
# determine url of file
url = config.get("podcast","url_base") + urllib.quote( os.path.basename(file[0]) )
# Add an item
item = ET.SubElement(channel, "item")
item_title = ET.SubElement(item, "title")
item_title.text = os.path.basename(file[0])
item_enclosure = ET.SubElement(item, "enclosure")
item_enclosure.set("url", url)
item_enclosure.set("type", config.get("media","mime_type"))
item_guid = ET.SubElement(item, "guid")
item_guid.text = url
item_pubdate = ET.SubElement(item, "pubDate")
item_pubdate.text = time.ctime(os.path.getctime(file[0]))
# cleanup the tree
Indent(rss)
# wrap it in an ElementTree instance
tree = ET.ElementTree(rss)
# write to XML file
tree.write(config.get("podcast","out_file"))
# Parse the command line options
def ParseArgs():
global args
default_config_path = os.path.join( sys.path[0], "feedmaker.conf" )
# Set up the argument parser
parser = argparse.ArgumentParser(description='Create podcast from radio stream.')
parser.add_argument('-x', action='store_true', default=False, dest='xml_only', help='Generate the XML file only')
parser.add_argument('-v', action='store_true', default=False, dest='verbose', help='Verbose')
parser.add_argument('-q', action='store_true', default=False, dest='quiet', help='Quiet mode')
parser.add_argument('-c', action='store', default=default_config_path, dest='conf_file', help='Config file')
parser.add_argument('-w', action='store_true', default=False, dest='write_conf_file', help='Write the config file with defaults')
# Parse the args
args = parser.parse_args()
# Define loging level
class LogLevel:
ERROR=1
INFO=2
DEBUG=3
# Log to the console using different log levels
def Log(level,log_string):
if level==LogLevel.ERROR:
print "ERROR - " + log_string
elif level==LogLevel.INFO:
if args.quiet == False:
print log_string
elif level==LogLevel.DEBUG:
if args.verbose == True:
print log_string
else:
raise Exception("Unknown logging level")
# main entry point
def main():
# Parse the argument
ParseArgs()
# Read the config file
ReadConfig()
# Rip the stream
if args.xml_only == False:
Log( LogLevel.INFO, "Ripping stream ..." )
RipStream()
# Get the MP3 list
Log( LogLevel.INFO, "Getting file list from folder " + config.get("media","location") + " ..." )
flist = GenFileList()
# Remove old files
Log( LogLevel.INFO, "Deleting old files ..." )
DeleteFiles(flist)
# Generate the XML file
Log( LogLevel.INFO, "Generating " + config.get("podcast","out_file") + " ..." )
GenXmlFile(flist)
Log( LogLevel.INFO, "Done." )
return 0
if __name__ == '__main__':
main()
| Python |
"""
Copyright (C) 2009, Jackie Ng
http://code.google.com/p/fdotoolbox, jumpinjackie@gmail.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
See license.txt for more/additional licensing information
"""
import clr
clr.AddReference("System.Drawing")
import System
import System.Drawing
import System.Windows.Forms
import System.Collections.Generic
from System import String
from System.Drawing import *
from System.Windows.Forms import *
from System.Collections.Generic import List, SortedList
from OSGeo.FDO.Schema import PropertyType
from OSGeo.FDO.Commands import CommandType
from FdoToolbox.Core.Feature import FeatureQueryOptions, FeatureAggregateOptions, CapabilityType
class ThemeOptionsDialog(Form):
def __init__(self, connMgr):
self.InitializeComponent()
self._connMgr = connMgr
def InitializeComponent(self):
self._label1 = System.Windows.Forms.Label()
self._cmbConnection = System.Windows.Forms.ComboBox()
self._groupBox1 = System.Windows.Forms.GroupBox()
self._groupBox2 = System.Windows.Forms.GroupBox()
self._label3 = System.Windows.Forms.Label()
self._cmbSchema = System.Windows.Forms.ComboBox()
self._label4 = System.Windows.Forms.Label()
self._cmbClass = System.Windows.Forms.ComboBox()
self._groupBox3 = System.Windows.Forms.GroupBox()
self._label2 = System.Windows.Forms.Label()
self._txtSdf = System.Windows.Forms.TextBox()
self._btnBrowse = System.Windows.Forms.Button()
self._label5 = System.Windows.Forms.Label()
self._cmbProperty = System.Windows.Forms.ComboBox()
self._label6 = System.Windows.Forms.Label()
self._label7 = System.Windows.Forms.Label()
self._txtClassPrefix = System.Windows.Forms.TextBox()
self._txtClassSuffix = System.Windows.Forms.TextBox()
self._btnPreview = System.Windows.Forms.Button()
self._grdRules = System.Windows.Forms.DataGridView()
self._btnOK = System.Windows.Forms.Button()
self._btnCancel = System.Windows.Forms.Button()
self._COL_VALUE = System.Windows.Forms.DataGridViewTextBoxColumn()
self._COL_CLASS = System.Windows.Forms.DataGridViewTextBoxColumn()
self._COL_FILTER = System.Windows.Forms.DataGridViewTextBoxColumn()
self._groupBox1.SuspendLayout()
self._groupBox2.SuspendLayout()
self._groupBox3.SuspendLayout()
self._grdRules.BeginInit()
self.SuspendLayout()
#
# label1
#
self._label1.Location = System.Drawing.Point(18, 27)
self._label1.Name = "label1"
self._label1.Size = System.Drawing.Size(100, 14)
self._label1.TabIndex = 0
self._label1.Text = "Connection"
#
# cmbConnection
#
self._cmbConnection.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
self._cmbConnection.FormattingEnabled = True
self._cmbConnection.Location = System.Drawing.Point(18, 44)
self._cmbConnection.Name = "cmbConnection"
self._cmbConnection.Size = System.Drawing.Size(212, 21)
self._cmbConnection.TabIndex = 1
self._cmbConnection.SelectedIndexChanged += self.CmbConnectionSelectedIndexChanged
#
# groupBox1
#
self._groupBox1.Controls.Add(self._label4)
self._groupBox1.Controls.Add(self._cmbClass)
self._groupBox1.Controls.Add(self._label3)
self._groupBox1.Controls.Add(self._cmbSchema)
self._groupBox1.Controls.Add(self._label1)
self._groupBox1.Controls.Add(self._cmbConnection)
self._groupBox1.Location = System.Drawing.Point(12, 13)
self._groupBox1.Name = "groupBox1"
self._groupBox1.Size = System.Drawing.Size(255, 159)
self._groupBox1.TabIndex = 2
self._groupBox1.TabStop = False
self._groupBox1.Text = "Source"
#
# groupBox2
#
self._groupBox2.Controls.Add(self._btnBrowse)
self._groupBox2.Controls.Add(self._txtSdf)
self._groupBox2.Controls.Add(self._label2)
self._groupBox2.Location = System.Drawing.Point(273, 13)
self._groupBox2.Name = "groupBox2"
self._groupBox2.Size = System.Drawing.Size(268, 159)
self._groupBox2.TabIndex = 3
self._groupBox2.TabStop = False
self._groupBox2.Text = "Target"
#
# label3
#
self._label3.Location = System.Drawing.Point(18, 68)
self._label3.Name = "label3"
self._label3.Size = System.Drawing.Size(100, 14)
self._label3.TabIndex = 2
self._label3.Text = "Schema"
#
# cmbSchema
#
self._cmbSchema.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
self._cmbSchema.FormattingEnabled = True
self._cmbSchema.Location = System.Drawing.Point(18, 85)
self._cmbSchema.Name = "cmbSchema"
self._cmbSchema.Size = System.Drawing.Size(212, 21)
self._cmbSchema.TabIndex = 3
self._cmbSchema.SelectedIndexChanged += self.CmbSchemaSelectedIndexChanged
#
# label4
#
self._label4.Location = System.Drawing.Point(18, 109)
self._label4.Name = "label4"
self._label4.Size = System.Drawing.Size(100, 14)
self._label4.TabIndex = 4
self._label4.Text = "Class"
#
# cmbClass
#
self._cmbClass.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
self._cmbClass.FormattingEnabled = True
self._cmbClass.Location = System.Drawing.Point(18, 126)
self._cmbClass.Name = "cmbClass"
self._cmbClass.Size = System.Drawing.Size(212, 21)
self._cmbClass.TabIndex = 5
self._cmbClass.SelectedIndexChanged += self.CmbClassSelectedIndexChanged
#
# groupBox3
#
self._groupBox3.Controls.Add(self._grdRules)
self._groupBox3.Controls.Add(self._btnPreview)
self._groupBox3.Controls.Add(self._txtClassSuffix)
self._groupBox3.Controls.Add(self._txtClassPrefix)
self._groupBox3.Controls.Add(self._label7)
self._groupBox3.Controls.Add(self._label6)
self._groupBox3.Controls.Add(self._cmbProperty)
self._groupBox3.Controls.Add(self._label5)
self._groupBox3.Location = System.Drawing.Point(12, 179)
self._groupBox3.Name = "groupBox3"
self._groupBox3.Size = System.Drawing.Size(529, 173)
self._groupBox3.TabIndex = 4
self._groupBox3.TabStop = False
self._groupBox3.Text = "Theme Settings"
#
# label2
#
self._label2.Location = System.Drawing.Point(17, 27)
self._label2.Name = "label2"
self._label2.Size = System.Drawing.Size(103, 14)
self._label2.TabIndex = 0
self._label2.Text = "SDF File"
#
# txtSdf
#
self._txtSdf.Location = System.Drawing.Point(17, 44)
self._txtSdf.Name = "txtSdf"
self._txtSdf.Size = System.Drawing.Size(211, 20)
self._txtSdf.TabIndex = 1
#
# btnBrowse
#
self._btnBrowse.Location = System.Drawing.Point(234, 42)
self._btnBrowse.Name = "btnBrowse"
self._btnBrowse.Size = System.Drawing.Size(25, 23)
self._btnBrowse.TabIndex = 2
self._btnBrowse.Text = "..."
self._btnBrowse.UseVisualStyleBackColor = True
self._btnBrowse.Click += self.BtnBrowseClick
#
# label5
#
self._label5.Location = System.Drawing.Point(18, 22)
self._label5.Name = "label5"
self._label5.Size = System.Drawing.Size(100, 17)
self._label5.TabIndex = 0
self._label5.Text = "Theme Property"
#
# cmbProperty
#
self._cmbProperty.DropDownStyle = System.Windows.Forms.ComboBoxStyle.DropDownList
self._cmbProperty.FormattingEnabled = True
self._cmbProperty.Location = System.Drawing.Point(109, 19)
self._cmbProperty.Name = "cmbProperty"
self._cmbProperty.Size = System.Drawing.Size(411, 21)
self._cmbProperty.TabIndex = 1
#
# label6
#
self._label6.Location = System.Drawing.Point(18, 51)
self._label6.Name = "label6"
self._label6.Size = System.Drawing.Size(120, 15)
self._label6.TabIndex = 2
self._label6.Text = "Generated Class Name"
#
# label7
#
self._label7.Location = System.Drawing.Point(227, 51)
self._label7.Name = "label7"
self._label7.Size = System.Drawing.Size(110, 15)
self._label7.TabIndex = 3
self._label7.Text = "[PROPERTY VALUE]"
#
# txtClassPrefix
#
self._txtClassPrefix.Location = System.Drawing.Point(155, 48)
self._txtClassPrefix.Name = "txtClassPrefix"
self._txtClassPrefix.Size = System.Drawing.Size(66, 20)
self._txtClassPrefix.TabIndex = 4
#
# txtClassSuffix
#
self._txtClassSuffix.Location = System.Drawing.Point(343, 48)
self._txtClassSuffix.Name = "txtClassSuffix"
self._txtClassSuffix.Size = System.Drawing.Size(66, 20)
self._txtClassSuffix.TabIndex = 5
#
# btnPreview
#
self._btnPreview.Location = System.Drawing.Point(445, 46)
self._btnPreview.Name = "btnPreview"
self._btnPreview.Size = System.Drawing.Size(75, 23)
self._btnPreview.TabIndex = 6
self._btnPreview.Text = "Preview"
self._btnPreview.UseVisualStyleBackColor = True
self._btnPreview.Click += self.BtnPreviewClick
#
# grdRules
#
self._grdRules.AllowUserToAddRows = False
self._grdRules.AllowUserToDeleteRows = False
self._grdRules.ColumnHeadersHeightSizeMode = System.Windows.Forms.DataGridViewColumnHeadersHeightSizeMode.AutoSize
self._grdRules.Columns.AddRange(System.Array[System.Windows.Forms.DataGridViewColumn](
[self._COL_VALUE,
self._COL_CLASS,
self._COL_FILTER]))
self._grdRules.Location = System.Drawing.Point(18, 75)
self._grdRules.Name = "grdRules"
self._grdRules.ReadOnly = True
self._grdRules.RowHeadersVisible = False
self._grdRules.Size = System.Drawing.Size(502, 92)
self._grdRules.TabIndex = 7
#
# btnOK
#
self._btnOK.Location = System.Drawing.Point(385, 362)
self._btnOK.Name = "btnOK"
self._btnOK.Size = System.Drawing.Size(75, 23)
self._btnOK.TabIndex = 5
self._btnOK.Text = "OK"
self._btnOK.UseVisualStyleBackColor = True
self._btnOK.Click += self.BtnOKClick
#
# btnCancel
#
self._btnCancel.DialogResult = System.Windows.Forms.DialogResult.Cancel
self._btnCancel.Location = System.Drawing.Point(466, 362)
self._btnCancel.Name = "btnCancel"
self._btnCancel.Size = System.Drawing.Size(75, 23)
self._btnCancel.TabIndex = 6
self._btnCancel.Text = "Cancel"
self._btnCancel.UseVisualStyleBackColor = True
self._btnCancel.Click += self.BtnCancelClick
#
# COL_VALUE
#
self._COL_VALUE.DataPropertyName = "Value"
self._COL_VALUE.HeaderText = "Property Value"
self._COL_VALUE.Name = "COL_VALUE"
self._COL_VALUE.ReadOnly = True
#
# COL_CLASS
#
self._COL_CLASS.AutoSizeMode = System.Windows.Forms.DataGridViewAutoSizeColumnMode.Fill
self._COL_CLASS.DataPropertyName = "ClassName"
self._COL_CLASS.HeaderText = "Generated Class Name"
self._COL_CLASS.Name = "COL_CLASS"
self._COL_CLASS.ReadOnly = True
#
# COL_FILTER
#
self._COL_FILTER.DataPropertyName = "Filter"
self._COL_FILTER.HeaderText = "Source Filter"
self._COL_FILTER.Name = "COL_FILTER"
self._COL_FILTER.ReadOnly = True
#
# Form1
#
self.AcceptButton = self._btnOK
self.CancelButton = self._btnCancel
self.ClientSize = System.Drawing.Size(553, 397)
self.ControlBox = False
self.Controls.Add(self._btnCancel)
self.Controls.Add(self._btnOK)
self.Controls.Add(self._groupBox3)
self.Controls.Add(self._groupBox2)
self.Controls.Add(self._groupBox1)
self.Name = "Form1"
self.Text = "Theme Options"
self.Load += self.ThemeOptionsDialogLoad
self._groupBox1.ResumeLayout(False)
self._groupBox2.ResumeLayout(False)
self._groupBox2.PerformLayout()
self._groupBox3.ResumeLayout(False)
self._groupBox3.PerformLayout()
self._grdRules.EndInit()
self.ResumeLayout(False)
def BtnCancelClick(self, sender, e):
self.DialogResult = DialogResult.Cancel
pass
def BtnOKClick(self, sender, e):
self.DialogResult = DialogResult.OK
pass
def ThemeOptionsDialogLoad(self, sender, e):
sourceConns = List[str](self._connMgr.GetConnectionNames())
if sourceConns.Count > 0:
self._cmbConnection.DataSource = sourceConns
self._cmbConnection.SelectedIndex = 0
pass
def GetConnection(self):
if self._cmbConnection.SelectedItem is None:
return None
connName = self.GetConnectionName()
conn = self._connMgr.GetConnection(connName)
return conn
def GetConnectionName(self):
return self._cmbConnection.SelectedItem.ToString()
def GetSchemaName(self):
return self._cmbSchema.SelectedItem.ToString()
def GetClassName(self):
return self._cmbClass.SelectedItem.ToString()
def GetPropertyName(self):
return self._cmbProperty.SelectedItem.ToString()
def GetThemeRules(self):
return self._grdRules.DataSource
def GetSdfFile(self):
return self._txtSdf.Text
def CmbConnectionSelectedIndexChanged(self, sender, e):
conn = self.GetConnection()
if conn is None:
return
svc = conn.CreateFeatureService()
try:
schemaNames = svc.GetSchemaNames()
if schemaNames.Count > 0:
self._cmbSchema.DataSource = schemaNames
self._cmbSchema.SelectedIndex = 0
finally:
svc.Dispose()
pass
def CmbSchemaSelectedIndexChanged(self, sender, e):
conn = self.GetConnection()
if conn is None:
return
schemaName = self.GetSchemaName()
if String.IsNullOrEmpty(schemaName):
return
svc = conn.CreateFeatureService()
try:
classNames = svc.GetClassNames(schemaName)
if classNames.Count > 0:
self._cmbClass.DataSource = classNames
self._cmbClass.SelectedIndex = 0
finally:
svc.Dispose()
pass
def CmbClassSelectedIndexChanged(self, sender, e):
conn = self.GetConnection()
if conn is None:
return
schemaName = self.GetSchemaName()
if String.IsNullOrEmpty(schemaName):
return
className = self.GetClassName()
if String.IsNullOrEmpty(className):
return
svc = conn.CreateFeatureService()
try:
cls = svc.GetClassByName(schemaName, className)
if not cls is None:
properties = List[str]()
for prop in cls.Properties:
if prop.PropertyType == PropertyType.PropertyType_DataProperty:
properties.Add(prop.Name)
if properties.Count > 0:
self._cmbProperty.DataSource = properties
self._cmbProperty.SelectedIndex = 0
finally:
svc.Dispose()
pass
def BtnPreviewClick(self, sender, e):
conn = self.GetConnection()
if conn is None:
return
className = self.GetClassName()
if String.IsNullOrEmpty(className):
return
propName = self.GetPropertyName()
if String.IsNullOrEmpty(propName):
return
svc = conn.CreateFeatureService()
propertyValues = List[str]()
try:
canDistinct = conn.Capability.GetBooleanCapability(CapabilityType.FdoCapabilityType_SupportsSelectDistinct)
if canDistinct:
#SortedList not only allows us to hackishly get set-like qualities, but we get sorting for free.
values = SortedList[str, str]()
query = FeatureAggregateOptions(className)
query.AddFeatureProperty(propName)
query.Distinct = True
reader = svc.SelectAggregates(query)
try:
while reader.ReadNext():
if not reader.IsNull(propName):
values.Add(reader[propName].ToString(), String.Empty)
finally:
reader.Dispose()
propertyValues.AddRange(values.Keys)
elif svc.SupportsCommand(CommandType.CommandType_SQLCommand):
sql = "SELECT DISTINCT " + propName + " FROM " + className + " ORDER BY " + propName
values = List[str]()
reader = svc.ExecuteSQLQuery(sql)
try:
while reader.ReadNext():
if not reader.IsNull(propName):
values.Add(reader[propName].ToString())
finally:
reader.Dispose()
propertyValues.AddRange(values)
else:
if App.Ask("Get Values", "About to fetch distinct values by brute force. Continue?"):
#SortedList not only allows us to hackishly get set-like qualities, but we get sorting for free.
values = SortedList[str, str]()
query = FeatureQueryOptions(className)
query.AddFeatureProperty(propName)
reader = svc.SelectFeatures(query)
try:
while reader.ReadNext():
if not reader.IsNull(propName):
values.Add(reader[propName].ToString(), String.Empty)
finally:
reader.Dispose()
propertyValues.AddRange(values.Keys)
finally:
svc.Dispose()
if propertyValues.Count > 0:
self._grdRules.Rows.Clear()
for pval in propertyValues:
className = self._txtClassPrefix.Text + pval + self._txtClassSuffix.Text
filter = propName + " = '" + pval + "'"
self._grdRules.Rows.Add(pval, className, filter)
pass
def BtnBrowseClick(self, sender, e):
save = SaveFileDialog()
save.Filter = "SDF files (*.sdf)|*.sdf"
if save.ShowDialog() == DialogResult.OK:
self._txtSdf.Text = save.FileName
pass | Python |
"""
Copyright (C) 2009, Jackie Ng
http://code.google.com/p/fdotoolbox, jumpinjackie@gmail.com
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
See license.txt for more/additional licensing information
"""
# themebuilder.py
#
# FDO Toolbox script to create a themed bulk copy operations.
#
# For example, suppose we want to create a themed data set that based on the
# attribute RTYPE (values: RES, COM, IND) this script will create a bulk copy
# operation with the following tasks:
#
# - Copy matching features to [FeatureClass]_RES where RTYPE = 'RES'
# - Copy matching features to [FeatureClass]_COM where RTYPE = 'COM'
# - Copy matching features to [FeatureClass]_IND where RTYPE = 'IND'
#
# Author: Jackie Ng (jumpinjackie@gmail.com)
import themedialog
from themedialog import ThemeOptionsDialog
def Run():
try:
diag = ThemeOptionsDialog(App.ConnectionManager)
diag.ShowDialog()
except Exception, ex:
App.ShowMessage("Error", ex.ToString())
Run() | Python |
# Copyright (C) 2009, Jackie Ng
# http:#code.google.com/p/fdotoolbox, jumpinjackie@gmail.com
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
# See license.txt for more/additional licensing information
# startup.py - Startup script for FDO Toolbox
#
# Performs any required startup logic/tasks.
#
# TODO: Eventually move all FDO Toolbox startup logic here.
def Run():
App.WriteLine("Running startup script")
# Add any startup logic here ...
Run() | Python |
# sdf2convert.py
#
# FDO Toolbox script to convert MapGuide 6.5 SDF files (SDF 2.0) to any FDO flat-file
# data store.
#
# Author: Jackie Ng (jumpinjackie@gmail.com)
#
# TODO: Utilise MgCoordinateSystem API if available to perform CS lookups.
from System import Activator, Type, Exception, String, Array
from System.IO import Path
from System.Collections.Generic import *
from System.Runtime.InteropServices import COMException
from FdoToolbox.Base.Forms import ProgressDialog
from FdoToolbox.Core.Utility import ExpressUtility
from OSGeo.FDO.Schema import *
from OSGeo.FDO.Expression import *
from OSGeo.FDO.Geometry import *
from OSGeo.FDO.Commands.SpatialContext import SpatialContextExtentType
# FDO Dimensionality Flags
FDO_DIM_XY = 0
FDO_DIM_Z = 1
FDO_DIM_M = 2
# SDF Component Toolkit flags
sdfOpenRead = 1
sdfOpenUpdate = 2
sdfCreateNew = 4
sdfCreateAlways = 8
sdfOpenAlways = 16
sdfOpenExisting = 32
# SDF Object Type flags
sdfUndefinedObject = -1
sdfPointObject = 0
sdfPolylineObject = 2
sdfPolyPolylineObject = 3
sdfPolygonObject = 4
sdfPolyPolygonObject = 5
sdfDeletedObject = 10
# SDF error codes (from SDF Component Toolkit documentation)
errorCodes = Dictionary[str,str]()
#errorCodes["0x00000000"] = "sdfOk"
#errorCodes["0x00041000"] = "sdfEof"
#errorCodes["0x00041000"] = "sdfSearchComplete"
errorCodes["0x8004100F"] = "sdfBadEof"
errorCodes["0x8004100E"] = "sdfBadFormat"
errorCodes["0x80041014"] = "sdfContainsNoObjects"
errorCodes["0x8004100C"] = "sdfElementAlreadyDefined"
errorCodes["0x80041013"] = "sdfIllegalFunctionCall"
errorCodes["0x80041020"] = "sdfInvalidCoordSysCode"
errorCodes["0x8004100B"] = "sdfInvalidElement"
errorCodes["0x80041008"] = "sdfInvalidOffset"
errorCodes["0x80041007"] = "sdfInvalidParameter"
errorCodes["0x8004100A"] = "sdfInvalidPrecision"
errorCodes["0x80041009"] = "sdfInvalidType"
errorCodes["0x80041001"] = "sdfNotAnSdf"
errorCodes["0x80041011"] = "sdfNotFound"
errorCodes["0x80041010"] = "sdfNotSupported"
errorCodes["0x80041019"] = "sdfObjectConstructionFailed"
errorCodes["0x80041015"] = "sdfObjectNotFound"
errorCodes["0x80041018"] = "sdfOutOfBounds"
errorCodes["0x80041012"] = "sdfOutOfMemory"
errorCodes["0x80041016"] = "sdfPolygonUnclosed"
errorCodes["0x80041017"] = "sdfPrecisionMismatch"
errorCodes["0x80041002"] = "sdfReadError"
errorCodes["0x80041005"] = "sdfReadOnly"
errorCodes["0x80041006"] = "sdfSearchNotInitialized"
errorCodes["0x8004100D"] = "sdfTooManyPoints"
errorCodes["0x80041000"] = "sdfUnableToOpen"
errorCodes["0x80041004"] = "sdfVersionMismatch"
errorCodes["0x80041003"] = "sdfWriteError"
#errorCodes["0x00000000"] = "sifOk"
#errorCodes["0x00041000"] = "sifSearchComplete"
#errorCodes["0x00041000"] = "sifTraversalComplete"
errorCodes["0x80042009"] = "sifBadNodeExtent"
errorCodes["0x8004200B"] = "sifBadNodeOffset"
errorCodes["0x8004200A"] = "sifBadObjectCount"
errorCodes["0x8004200D"] = "sifCantCreateTempFile"
errorCodes["0x8004200F"] = "sifCantReplaceSif"
errorCodes["0x80042007"] = "sifInvalidParameter"
errorCodes["0x8004200C"] = "sifNodeUnderflow"
errorCodes["0x80042001"] = "sifNotAnSif"
errorCodes["0x80042008"] = "sifObjectNotFound"
errorCodes["0x80042002"] = "sifReadError"
errorCodes["0x80042005"] = "sifReadOnly"
errorCodes["0x80042006"] = "sifSearchNotInitialized"
errorCodes["0x8004200E"] = "sifTempFileWriteError"
errorCodes["0x80042006"] = "sifTraversalNotInitialized"
errorCodes["0x80042000"] = "sifUnableToOpen"
errorCodes["0x80042004"] = "sifVersionMismatch"
errorCodes["0x80042003"] = "sifWriteError"
#errorCodes["0x00000000"] = "kifOk"
#errorCodes["0x00041000"] = "kifSearchComplete"
errorCodes["0x8004300B"] = "kifBadNodeOffset"
errorCodes["0x8004300A"] = "kifBadObjectCount"
errorCodes["0x8004300D"] = "kifCantCreateTempFile"
errorCodes["0x8004300F"] = "kifCantReplaceKif"
errorCodes["0x80043009"] = "kifDuplicateObject"
errorCodes["0x80043007"] = "kifInvalidParameter"
errorCodes["0x80043011"] = "kifKeysNotOrdered"
errorCodes["0x80043010"] = "kifKeyTooLong"
errorCodes["0x8004300C"] = "kifNodeUnderflow"
errorCodes["0x80043001"] = "kifNotAKif"
errorCodes["0x80043008"] = "kifObjectNotFound"
errorCodes["0x80043002"] = "kifReadError"
errorCodes["0x80043005"] = "kifReadOnly"
errorCodes["0x80043006"] = "kifSearchNotInitialized"
errorCodes["0x8004300E"] = "kifTempFileWriteError"
errorCodes["0x80043000"] = "kifUnableToOpen"
errorCodes["0x80043004"] = "kifVersionMismatch"
errorCodes["0x80043003"] = "kifWriteError"
SDF_TOOLKIT_PROGID = "Autodesk.MgSdfToolkit"
SDF2_SCHEMA = "SDF2Schema"
class Sdf2ConversionException(Exception):
def __init__(self):
Exception.__init__(self)
def __init__(self, message):
Exception.__init__(self, message)
def __init__(self, message, innerException):
Exception.__init__(self, message, innerException)
"""
Creates a COM object from the given prog ID
"""
def CreateObject(progId):
type = Type.GetTypeFromProgID(progId)
return Activator.CreateInstance(type)
"""
Gets the error message from a COM Exception based on the HRESULT error code
"""
def GetComErrorMsg(hresult):
codeStr = String.Format("0x{0:X}", hresult)
return errorCodes[codeStr] if errorCodes.ContainsKey(codeStr) else ""
"""
Creates a LL84 spatial context
"""
def CreateLL84SpatialContext():
sc = SpatialContextInfo()
sc.Name = "Default"
sc.Description = "Lat-Long WGS84"
sc.CoordinateSystem = "GEOGCS['LL84',DATUM['WGS84',SPHEROID['WGS84',6378137.000,298.25722293]],PRIMEM['Greenwich',0],UNIT['Degree',0.01745329251994]]"
sc.CoordinateSystemWkt = sc.CoordinateSystem
sc.ExtentType = SpatialContextExtentType.SpatialContextExtentType_Dynamic
sc.IsActive = True
sc.XYTolerance = 0.001
sc.ZTolerance = 0.001
return sc
"""
Creates the SDF2 FDO feature schema
"""
def CreateSdf2Schema(className):
schema = FeatureSchema(SDF2_SCHEMA, "")
fc = FeatureClass(className, "")
id = DataPropertyDefinition("Autogenerated_SDF_ID", "")
name = DataPropertyDefinition("Name", "")
key = DataPropertyDefinition("Key", "")
url = DataPropertyDefinition("Url", "")
geom = GeometryPropertyDefinition("Geom", "")
id.DataType = DataType.DataType_Int32
name.DataType = DataType.DataType_String
key.DataType = DataType.DataType_String
url.DataType = DataType.DataType_String
name.Length = 255
key.Length = 255
url.Length = 255
id.Nullable = False
name.Nullable = True
key.Nullable = False
url.Nullable = True
geom.GeometryTypes = GeometricType.GeometricType_All
geom.SpatialContextAssociation = "Default"
fc.Properties.Add(id)
fc.Properties.Add(name)
fc.Properties.Add(key)
fc.Properties.Add(url)
fc.Properties.Add(geom)
fc.IdentityProperties.Add(id)
fc.GeometryProperty = geom
schema.Classes.Add(fc)
return schema
"""
Converts a SdfGeometry to a FGF geometry
"""
def ConvertToFgf(sdfGeometry, sdfGeometryType, geomFactory):
if sdfGeometry.IsEmpty():
return None
if sdfGeometryType == sdfPointObject:
sdfPoint = sdfGeometry.GetAt(0)
pos = geomFactory.CreatePositionXY(sdfPoint.X, sdfPoint.Y)
pt = geomFactory.CreatePoint(pos)
fgf = geomFactory.GetFgf(pt)
pt.Dispose()
pos.Dispose()
return GeometryValue(fgf)
elif sdfGeometryType == sdfPolylineObject:
ordinates = Array[str](range(sdfGeometry.TotalPoints * 2))
# Should only have one segment
sdfGeomSeg = sdfGeometry.GetAt(0)
for i in range(0, sdfGeomSeg.Size - 1):
sdfPoint = sdfGeomSeg.GetAt(i)
ordinates[i*2] = sdfPoint.X
ordinates[i*2+1] = sdfPoint.Y
linestr = geomFactory.CreateLineString(FDO_DIM_XY, ordinates.Length, ordinates)
fgf = geomFactory.GetFgf(linestr)
linestr.Dispose()
return GeometryValue(fgf)
elif sdfGeometryType == sdfPolyPolylineObject:
lineStringCol = LineStringCollection()
for i in range(0, sdfGeometry.Size - 1):
ordinates = Array[str](range(sdfGeometry.TotalPoints * 2))
# Should only have one segment
sdfGeomSeg = sdfGeometry.GetAt(i)
for j in range(0, sdfGeomSeg.Size - 1):
sdfPoint = sdfGeomSeg.GetAt(j)
ordinates[j*2] = sdfPoint.X
ordinates[j*2+1] = sdfPoint.Y
linestr = geomFactory.CreateLineString(FDO_DIM_XY, ordinates.Length, ordinates)
lineStringCol.Add(linestr)
multiLine = geomFactory.CreateMultiLineString(lineStringCol)
fgf = geomFactory.GetFgf(fgf)
multiLine.Dispose()
lineStringCol.Dispose()
return GeometryValue(fgf)
elif sdfGeometryType == sdfPolygonObject:
# GUESS: SDF Com Tk docs doesn't really specify how polygons are to be interpreted
# so, I'm going on the assumption here the first segment is the exterior ring and
# every segment after are the interior rings
innerRings = LinearRingCollection()
# Process outer ring
ordinates = Array[str](range(sdfGeometry.TotalPoints * 2))
sdfGeomSeg = sdfGeometry.GetAt(0)
for j in range(0, sdfGeomSeg.Size - 1):
sdfPoint = sdfGeomSeg.GetAt(j)
ordinates[j*2] = sdfPoint.X
ordinates[j*2+1] = sdfPoint.Y
outerRing = geomFactory.CreateLinearRing(FDO_DIM_XY, ordinates.Length, ordinates)
# Process interior rings
for i in range(1, sdfGeometry.Size - 1):
ordinates = Array[str](range(sdfGeometry.TotalPoints * 2))
sdfGeomSeg = sdfGeometry.GetAt(i)
for j in range(0, sdfGeomSeg.Size - 1):
sdfPoint = sdfGeomSeg.GetAt(j)
ordinates[j*2] = sdfPoint.X
ordinates[j*2+1] = sdfPoint.Y
ring = geomFactory.CreateLinearRing(FDO_DIM_XY, ordinates.Length, ordinates)
innerRings.Add(linestr)
polygon = geomFactory.CreatePolygon(outerRing, innerRings)
fgf = geomFactory.GetFgf(fgf)
polygon.Dispose()
innerRings.Dispose()
return GeometryValue(fgf)
elif sdfGeometryType == sdfPolyPolygonObject:
# SDF Com TK documentation definitely fails me here! Just how do we know where a
# polygon ends and another one starts?
return None
else:
return None
"""
Performs the actual conversion
"""
def Convert(sdfFile, dstFile):
try:
gf = FgfGeometryFactory()
App.WriteLine("Attempting to obtain SDF COM tk interface")
sdfTk = CreateObject(SDF_TOOLKIT_PROGID)
if sdfTk is None:
raise Sdf2ConversionException("Could not obtain the SDF component toolkit interface. Please ensure that the MapGuide SDF Component Toolkit has been installed")
App.WriteLine("Opening for read: " + sdfFile)
sdfTk.Open(sdfFile, sdfOpenRead, False)
App.WriteLine("Opened: " + sdfFile)
if not dstFile is None:
if ExpressUtility.CreateFlatFileDataSource(dstFile, True):
App.WriteLine("Created: " + dstFile)
# all good. Let the conversion begin!
try:
total = sdfTk.TotalObjects
fdoConn = ExpressUtility.CreateFlatFileConnection(dstFile)
# create and apply SDF2 feature schema
className = Path.GetFileNameWithoutExtension(sdfFile)
schema = CreateSdf2Schema(className)
svc = fdoConn.CreateFeatureService(False)
try:
svc.ApplySchema(schema)
App.WriteLine("SDF2 Schema applied")
# create spatial context from CS metadata
csMeta = sdfTk.CoordinateSystemMetadata
if csMeta.CoordinateSystemCode == "LL84":
sc = CreateLL84SpatialContext()
svc.CreateSpatialContext(sc, True)
App.WriteLine("Spatial Context Created")
ProgressDialog.Show(total, False)
ProgressDialog.SetTitle("Converting SDF2 features")
ProgressDialog.SetMessage("Converted:")
count = 0
failed = 0
sdfTk.BeginSequentialSearch()
sdfObj = sdfTk.SearchToNextObject()
values = Dictionary[str,ValueExpression]()
while not sdfObj is None:
geomValue = ConvertToFgf(sdfObj.Geometry, sdfObj.Type)
if not geomValue is None:
values.Clear()
values["Name"] = StringValue(sdfObj.Name)
values["Key"] = StringValue(sdfObj.Key)
values["Url"] = StringValue(sdfObj.URL)
values["Geom"] = geomValue
svc.InsertFeatures(className, values, False)
count += 1
ProgressDialog.SetValue(count)
else:
failed += 1
sdfObj = sdfTk.SearchToNextObject()
finally:
ProgressDialog.Stop()
svc.Dispose()
App.ShowMessage("Convert", count.ToString() + " of " + total.ToString() + " features converted. " + failed.ToString() + " features failed to convert")
except Exception, ex:
raise Sdf2ConversionException("Error occurred during conversion", ex)
else:
raise Sdf2ConversionException("Unable to create data source: " + dstFile)
else:
raise Sdf2ConversionException("Target file not specified. Cannot continue")
finally:
sdfTk = None
gf.Dispose()
def Run():
sdfFile = App.GetFileForOpen("Open SDF File", "SDF 2.0 files (*.sdf)|*.sdf")
dstFile = App.GetFileForSave("Save As", "SDF 3.0 (*.sdf)|*.sdf|SQLite (*.db)|*.db")
if not String.IsNullOrEmpty(sdfFile):
if String.IsNullOrEmpty(dstFile):
App.ShowMessage("Error", "Target file not specified. Aborting")
else:
try:
Convert(sdfFile, dstFile)
except COMException, ex:
msg = GetComErrorMsg(ex.ErrorCode)
if String.IsNullOrEmpty(msg):
App.ShowMessage("Error", ex.ToString())
else:
App.ShowMessage("Error", "Error Code: " + msg)
except Sdf2ConversionException, ex:
App.ShowMessage("Error", ex.ToString())
Run() | Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
#!/usr/bin/python
print "hello world"
| Python |
#!/usr/bin/python
print "hello world"
| Python |
#!/usr/bin/python2.6
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build the ScriptCover Extension."""
__author__ = 'ekamenskaya@google.com (Ekaterina Kamenskaya)'
import logging
import optparse
import os
import shutil
import subprocess
import urllib
import zipfile
CHECKOUT_CLOSURE_COMMAND = ('svn checkout http://closure-library.googlecode.com'
'/svn/trunk/ closure-library')
CLOSURE_COMPILER_URL = ('http://closure-compiler.googlecode.com/files/'
'compiler-latest.zip')
SOY_COMPILER_URL = ('http://closure-templates.googlecode.com/files/'
'closure-templates-for-javascript-latest.zip')
SOYDATA_URL = ('http://closure-templates.googlecode.com/svn/trunk/javascript/'
'soydata.js')
COMPILE_CLOSURE_COMMAND = ('closure-library/closure/bin/build/closurebuilder.py'
' --root=src'
' --root=closure-library'
' --root=build_gen'
' --output_mode=compiled'
' --output_file=%s'
' --compiler_jar=compiler.jar')
SOY_COMPILER_COMMAND = ('java -jar SoyToJsSrcCompiler.jar'
' --shouldProvideRequireSoyNamespaces'
' --outputPathFormat %(output)s'
' %(file)s')
class ClosureError(Exception):
pass
def BuildClosureScript(input_filenames, output_filename):
"""Build a compiled closure script based on the given input file.
Args:
input_filenames: A sequence of strings representing names of the input
scripts to compile.
output_filename: A string representing the name of the output script.
Raises:
ClosureError: If closure fails to compile the given input file.
"""
input_opts = ' '.join('--input=%s' % fname for fname in input_filenames)
# Appends --input parameter to COMPILE_CLOSURE_COMMAND.
cmd = COMPILE_CLOSURE_COMMAND % output_filename + ' ' + input_opts
result = ExecuteCommand(cmd)
if result or not os.path.exists(output_filename):
raise ClosureError('Failed while compiling to %s.' % output_filename)
def BuildSoyJs(input_file):
"""Builds a javascript file from a soy file.
Args:
input_file: A path to the soy file to compile into JavaScript. The js file
will be stored in build_gen/{FILENAME}.soy.js
Raises:
ClosureError: If the soy compiler fails to compile.
"""
output_name = os.path.join('build_gen', '%s.js' % input_file)
result = ExecuteCommand(
SOY_COMPILER_COMMAND % {
'file': input_file,
'output': output_name})
if result or not os.path.exists(os.path.join(output_name)):
raise ClosureError('Failed while compiling the soy file %s.' % input_file)
def Clean():
if os.path.exists(os.path.join('clean')):
shutil.rmtree(os.path.join('build'))
if os.path.exists(os.path.join('build_gen')):
shutil.rmtree(os.path.join('build_gen'))
def ExecuteCommand(command):
"""Execute the given command and return the output.
Args:
command: A string representing the command to execute.
Returns:
The return code of the process.
"""
print 'Running command: %s' % command
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
results = process.communicate()
if process.returncode:
logging.error(results[1])
return process.returncode
def SetupClosure():
"""Setup the closure library and compiler.
Checkout the closure library using svn if it doesn't exist. Also, download
the closure compiler.
Raises:
ClosureError: If the setup fails.
"""
# Set up the svn repo for closure if it doesn't exist.
if not os.path.exists(os.path.join('closure-library')):
ExecuteCommand(CHECKOUT_CLOSURE_COMMAND)
if not os.path.exists(os.path.join('closure-library')):
logging.error(('Could not check out the closure library from svn. '
'Please check out the closure library to the '
'"closure-library" directory.'))
raise ClosureError('Could not set up the closure library.')
# Download the compiler jar if it doesn't exist.
if not os.path.exists(os.path.join('compiler.jar')):
(compiler_zip, _) = urllib.urlretrieve(CLOSURE_COMPILER_URL)
compiler_zipfile = zipfile.ZipFile(compiler_zip)
compiler_zipfile.extract(os.path.join('compiler.jar'))
if not os.path.exists(os.path.join('compiler.jar')):
logging.error('Could not download the closure compiler jar.')
raise ClosureError('Could not find the closure compiler.')
# Download the soy compiler jar if it doesn't exist.
if (not os.path.exists('SoyToJsSrcCompiler.jar') or
not os.path.exists(os.path.join('build_gen', 'soyutils_usegoog.js'))):
(soy_compiler_zip, _) = urllib.urlretrieve(SOY_COMPILER_URL)
soy_compiler_zipfile = zipfile.ZipFile(soy_compiler_zip)
soy_compiler_zipfile.extract('SoyToJsSrcCompiler.jar')
soy_compiler_zipfile.extract('soyutils_usegoog.js', 'build_gen')
if (not os.path.exists('SoyToJsSrcCompiler.jar') or
not os.path.exists(os.path.join('build_gen', 'soyutils_usegoog.js'))):
logging.error('Could not download the soy compiler jar.')
raise ClosureError('Could not find the soy compiler.')
# Download required soydata file, which is required for soyutils_usegoog.js
# to work.
if not os.path.exists(os.path.join('build_gen', 'soydata.js')):
urllib.urlretrieve(SOYDATA_URL, os.path.join('build_gen', 'soydata.js'))
if not os.path.exists(os.path.join('build_gen', 'soydata.js')):
logging.error('Could not download soydata.js.')
raise ClosureError('Could not fine soydata.js')
def main():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('--clean', dest='build_clean',
action='store_true', default=False,
help='Clean the build directories.')
options = parser.parse_args()[0]
if options.build_clean:
Clean()
exit()
# Set up the directories that will be built into.
if not os.path.exists(os.path.join('build')):
os.mkdir(os.path.join('build'))
if not os.path.exists(os.path.join('build_gen')):
os.mkdir(os.path.join('build_gen'))
# Get external Closure resources.
SetupClosure()
# Compile the closure scripts.
soy_files = ['coverage_report.soy', 'popup.soy']
for soy_filename in soy_files:
BuildSoyJs(os.path.join('src', soy_filename))
js_targets = [('content_compiled.js',
['scriptLoader.js', 'instrumentation.js', 'startTool.js']),
('background_compiled.js',
['showCoverageHelper.js', 'background.js']),
('inject_compiled.js', ['backgroundInteraction.js']),
('popup_compiled.js', ['popup.js', 'background.js'])]
for dest, sources in js_targets:
BuildClosureScript((os.path.join('src', src) for src in sources),
os.path.join('build', dest))
# Copy over the static resources
if os.path.exists(os.path.join('build', 'styles')):
shutil.rmtree(os.path.join('build', 'styles'))
shutil.copytree(os.path.join('src', 'styles'),
os.path.join('build', 'styles'))
if os.path.exists(os.path.join('build', 'third_party')):
shutil.rmtree(os.path.join('build', 'third_party'))
shutil.copytree(os.path.join('src', 'third_party'),
os.path.join('build', 'third_party'))
static_files = [os.path.join('src', 'background.html'),
os.path.join('src', 'popup.html'),
os.path.join('src', 'manifest.json'),
os.path.join('src', 'brticon.png')]
for static_file in static_files:
shutil.copy(static_file, 'build')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import gtk
import webbrowser
from pagewindow import *
def on_click(self, widget, event):
if self.pointer_on_rest == True:
# Which button was clicked?
self.pointer = self.window.get_pointer()
for idx in range(self.feedrange[self.feedseq]):
if self.location[idx] < self.pointer[0] < (self.location[idx] + self.width[idx]):
if event.button == 1:
print idx
PageWindow(self.feedcontent[self.feedseq].entries[idx].link)
break
elif event.button == 2:
webbrowser.open(self.feedcontent[self.feedseq].entries[idx].link)
break
elif event.button == 3:
pass
elif self.inner_logo_location[idx] < self.pointer[0] < (self.inner_logo_location[idx] + self.inner_logo_width):
if event.button == 1:
print idx
PageWindow(self.feedcontent[self.feedseq].feed.link)
break
elif event.button == 2:
webbrowser.open(self.feedcontent[self.feedseq].feed.link)
break
elif event.button == 3:
pass
# webbrowser.open(self.feedcontent[self.feedseq].entries[idx].link)
if event.type == gtk.gdk.BUTTON_PRESS:
pass
elif event.type == gtk.gdk._2BUTTON_PRESS:
pass
elif event.type == gtk.gdk._3BUTTON_PRESS:
if not self.feeds[self.feedseq]['RtL']: # direction of movement
self.xLocation = self.w + 100
elif self.feeds[self.feedseq]['RtL']: # direction of movement
self.xLocation = 0
self.movespeed = 3
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import ConfigParser
DIR_USER = os.environ['HOME'] + '/.feedbar'
def getfeeds():
feeds = {}
config = ConfigParser.RawConfigParser()
config.read(DIR_USER + '/feeds.cfg')
for x in config.sections():
if config.get(x, 'feed_type') == 'Feed':
localdict = {}
localdict['feed_type'] = config.get(x, 'feed_type')
localdict['feed_title'] = config.get(x, 'feed_title')
localdict['feed_link'] = config.get(x, 'feed_link')
localdict['feedmax'] = config.getint(x, 'feedmax')
localdict['RtL'] = config.getboolean(x, 'RtL')
localdict['pango'] = config.getboolean(x, 'pango')
feeds[int(x)] = localdict
return feeds
| Python |
#!/usr/bin/python
import sys
import gobject
from feedbar import feedbar
#if __name__ == "__main__":
feedbar.start_bar()
| Python |
#!/usr/bin/python
import sys
import gobject
from mainbar import *
#test
APP_NAME = ('Feedbar')
APP_VERSION = '0.01'
DIR_USER = os.environ['HOME'] + '/.feedbar'
DIR_TMP = '/tmp/feedbar/'
#test
def create_user_dir ():
"""Create the userdir for the feedbar."""
for x in [DIR_USER, DIR_USER + "/images", DIR_USER + "/tmp"]:
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isfile(DIR_USER + "/feeds.cfg"):
preferences = open(DIR_USER + "/preferences.cfg", 'w')
if not os.path.isfile(DIR_USER + "/feeds.cfg"):
feeds = open(DIR_USER + "/feeds.cfg", 'w')
def start_bar ():
create_user_dir()
bar = Bar()
reactor.run()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from twisted.internet import reactor
from twisted.web.client import getPage
import cairo
import os
import gtk
#temporary, remove later
DIR_USER = os.environ['HOME'] + '/.feedbar'
DIR_IMG = os.environ['HOME'] + '/.feedbar/images/'
DIR_TMP = '/tmp/feedbar/'
if not os.path.isdir(DIR_TMP):
os.mkdir(DIR_TMP)
def getlogo(url, height):
filename = os.path.basename(url)
agent = getPage(url)
agent.addCallback(saveImg, filename, height)
agent.addErrback(errorHandler)
def saveImg(data, name, height):
tempfile = DIR_TMP + name
basename, extension = os.path.splitext(name)
open(tempfile, 'wb').write(data)
if not extension == ".png":
pixbuf = gtk.gdk.pixbuf_new_from_file(tempfile)
os.remove(tempfile)
tempfile = DIR_TMP + basename + ".png"
name = basename + ".png"
pixbuf.save(tempfile, "png")
scaleImg(tempfile, name, height)
def scaleImg(tempfile, name, height):
# print tempfile
source = cairo.ImageSurface.create_from_png(tempfile)
logo_height = source.get_height()
logo_width = source.get_width()
new_height = height * 0.8
measure = new_height / logo_height
new_width = logo_width * measure
surface = cairo.ImageSurface(source.get_format(), int(new_width), int(new_height))
cr = cairo.Context(surface)
cr.scale(measure, measure)
cr.set_source_surface (source, 0, 0)
cr.paint()
surface.write_to_png(DIR_IMG + name)
os.remove(tempfile)
def errorHandler(failure):
"""Error Handling"""
print "problem"
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#TODO
#fix atom summaries
import os, sys
import gtk
import glib
import cairo
import time
from twisted.internet import gtk2reactor
gtk2reactor.install()
from twisted.internet import reactor
from twisted.web.client import getPage
import html2text
import logging, logging.handlers
from django.utils.translation import ugettext_lazy as _
#clean these later
from RSS import *
from properties import *
from fbevents import *
from feedsparse import *
#from getlogo import *
#temporary
import webbrowser
#Test
DIR_USER = os.environ['HOME'] + '/.feedbar'
DIR_USER1 = '/usr/share/feedbar'
DIR_USER2 = '/usr/local/share/feedbar'
class Bar:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("Feedbar")
self.window.resize(gtk.gdk.screen_width(), 30)
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DOCK)
self.window.move(0, gtk.gdk.screen_height()-55)
self.window.show()
self.window.window.property_change("_NET_WM_STRUT", "CARDINAL", 32,
gtk.gdk.PROP_MODE_REPLACE, [0, 0, 0, 55])
self.bar()
# show gtk icons
self.settings = gtk.settings_get_default()
self.settings.set_property("gtk-button-images", True)
self.window.connect("destroy", gtk.main_quit)
# self.window.set_tooltip_text("The current feed from {0}".format(self.feedtitle))
self.window.connect("button-press-event", self._on_button_press_event)
self.window.connect("scroll-event", self.scroll_event)
self.window.connect("enter-notify-event", self.enter_notify)
self.window.connect("leave-notify-event", self.leave_notify)
self.window.props.has_tooltip = True
self.window.connect("query-tooltip", self.query_tooltip_cb)
self.window.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.task_done = False
self.collapsed = False
self.formatting()
self.optionsavail = {'Feed': (RSSdraw, RSSdownload)}
self.feeds = getfeeds()
for seq, info in enumerate(self.feeds.values()):
# (self.type[seq], feed_title, feed_link, self.feedmax[seq], self.RtL[seq], self.pango_enabled[seq]) = info
feed_type = self.feeds[seq]['feed_type']
self.optionsavail[feed_type][1](self, self.feeds[seq]['feed_link'], seq)
# glib.timeout_add_seconds(120, self.optionsavail[self.type[seq]][1], self, feed_link, seq)
#test <<<<>>>>>>
self.draw[seq] = self.optionsavail[feed_type][0]
glib.timeout_add_seconds(300, self.download_timer)
if not self.feeds[self.feedseq]['RtL']:
self.xLocation = self.w + 100
elif self.feeds[self.feedseq]['RtL']:
self.xLocation = 0
self.darea = gtk.DrawingArea()
self.darea.connect("expose-event", self.expose, self.draw[self.feedseq])
self.window.add(self.darea)
glib.timeout_add(30, self.on_timer)
self.window.show_all()
def bar(self):
self.statusIcon = gtk.StatusIcon()
self.statusIcon.set_from_file(DIR_USER1 + "/images/feedbar.png")
self.statusIcon.set_visible(True)
self.statusIcon.set_tooltip("Feedbar")
self.menu = gtk.Menu()
self.menuItem = gtk.ImageMenuItem(gtk.STOCK_PROPERTIES)
self.menuItem.connect('activate', self.execute_cb, self.statusIcon)
self.menu.append(self.menuItem)
self.menuItem = gtk.ImageMenuItem(stock_id='Official Site')
self.menuItem.connect('activate', self.opensite_cb, self.statusIcon)
self.menu.append(self.menuItem)
separator = gtk.MenuItem()
self.menu.append(separator)
self.menuItem = gtk.ImageMenuItem(gtk.STOCK_QUIT)
self.menuItem.connect('activate', self.quit_cb, self.statusIcon)
self.menu.append(self.menuItem)
self.statusIcon.connect('popup-menu', self.popup_menu_cb, self.menu)
self.statusIcon.set_visible(1)
self.statusIcon.connect("button-press-event", self.collapse_cb)
def on_timer(self):
if not self.timer: return False
# if not self.movespeed == 0:
if not self.collapsed:
self.darea.queue_draw()
return True
def download_timer(self):
for seq, info in enumerate(self.feeds.values()):
feed_type = self.feeds[seq]['feed_type']
self.optionsavail[feed_type][1](self, self.feeds[seq]['feed_link'], seq)
return True
def formatting(self):
self.feedseq = 0
self.font = "MgOpen Modata"
self.timer = True
self.movespeed = 3
self.initial_moves = False
self.width = {}
self.height = {}
self.height_diff = {}
self.location = {}
self.inner_logo_location = {}
self.out = {}
self.feedcontent = {}
self.logo = {}
self.draw = {}
self.feedrange = {}
self.layout = {}
self.h = self.window.allocation.height
self.w = self.window.allocation.width
self.pointer_on_bar = False
self.pointer_on_title = True
self.pointer_on_rest = False
def expose(self, widget, event, feed_type):
cr = widget.window.cairo_create()
# main bar
if not self.feeds[self.feedseq]['RtL']: # direction of movement
self.xLocation = self.xLocation - self.movespeed
elif self.feeds[self.feedseq]['RtL']:
self.xLocation = self.xLocation + self.movespeed
crlinear = cairo.LinearGradient(0.0, 0.0, 0.0, 30)
# crlinear.add_color_stop_rgba (0.3, 0.998, 0.294, 0.176, 1)
# crlinear.add_color_stop_rgba (1, 0.776470588, 0.062745098, 0.039215686, 1)
crlinear.add_color_stop_rgba (0.6, 0.247, 0.470588235, 0.662745098, 1)
crlinear.add_color_stop_rgba (1, 0.184313725, 0.345098039, 0.50, 1)
cr.set_source(crlinear)
cr.rectangle((self.w/7) - ((self.w/7)/13), 0, self.w, self.h)
cr.fill()
cr.select_font_face(self.font, cairo.FONT_SLANT_NORMAL)
cr.set_source_rgb(1, 1, 1)
#Draw here
feed_type(self, self.window, cr)
# title part
# cr.rectangle (0,0,(self.w/7),self.h)
cr.set_source_rgba(0.9, 0.9, 0.9, 1)
cr.move_to(0, self.h)
cr.line_to(0, 0)
cr.line_to((self.w/7) - ((self.w/7)/13), 0)
cr.curve_to((self.w/7), 5, (self.w/7), 25, (self.w/7), 30)
cr.close_path()
cr.fill()
crlinear = cairo.LinearGradient((self.w/7), 0.0, (self.w/7), 30)
crlinear.add_color_stop_rgba (0.6, 0.84, 0.84, 0.84, 1)
crlinear.add_color_stop_rgba (1, 0.72, 0.72, 0.72, 1)
cr.set_source(crlinear)
cr.move_to(30, self.h)
cr.curve_to(80, 20, 80, 10, (self.w/7) - ((self.w/7)/13), 0)
cr.curve_to((self.w/7), 5, (self.w/7), 25, (self.w/7), 30)
# cr.line_to((self.w/7), self.h)
cr.close_path()
cr.fill()
# cr.save()
# drawing image (comsumes a bit more cpu)
try:
surface = self.surface
logo_height = self.logo_height
logo_width = self.logo_width
measure = self.measure
logo_opt_h = self.logo_opt_h
except AttributeError:
surface = cairo.ImageSurface.create_from_png(DIR_USER1 + "/images/title_logo.png")
logo_height = float(surface.get_height())
logo_width = float(surface.get_width())
new_height = self.h * 0.8
measure = new_height / logo_height
logo_opt_h = (self.h/2 - ((logo_height)*measure)/2)/measure
self.surface = surface
self.logo_height = logo_height
self.logo_width = logo_width
self.measure = measure
self.logo_opt_h = logo_opt_h
# print "image from png"
cr.scale(measure, measure)
cr.rectangle (20/measure, logo_opt_h, logo_width, logo_height)
cr.set_source_surface(surface, 20/measure, logo_opt_h)
cr.fill()
# cr.restore()
# cr.set_source_rgba(0.776470588, 0.062745098, 0.039215686, 1)
# cr.select_font_face("Sans", cairo.FONT_SLANT_NORMAL)
# cr.set_font_size(17)
# cr.move_to(50, self.h/1.4)
# cr.show_text("Latest News")
try:
# moving to next feed
if not self.feeds[self.feedseq]['RtL']:
m_condition = self.location[self.feedrange[self.feedseq]-1] <= self.stoppoint + 100
elif self.feeds[self.feedseq]['RtL']:
m_condition = self.location[len(self.feedcontent[self.feedseq]['entries'])-1] >= self.w + 100
if m_condition:
self.nextfeedseq = self.feedseq + 1
if not self.nextfeedseq in self.feeds:
self.feedseq = 0
else:
self.feedseq = self.nextfeedseq
parse(self, self.out[self.feedseq], self.feedseq)
# clearing cache
del self.font_size
del self.inner_surface
if not self.feeds[self.feedseq]['pango']:
self.width = {}
if self.feeds[self.feedseq]['pango']:
self.layout = {}
# for idx in range(len(self.feedcontent[self.feedseq]['entries'])):
# measurewidth(self, cr, idx)
if not self.feeds[self.feedseq]['RtL']: # direction of movement
self.xLocation = self.w + 100
elif self.feeds[self.feedseq]['RtL']: # direction of movement
self.xLocation = 0
# print "next feed" , self.feedseq
except KeyError:
# print "Loading, current sequence", self.feedseq, "While moving to next feed"
if not self.feeds[self.feedseq]['RtL']:
self.xLocation = self.w + 100
elif self.feeds[self.feedseq]['RtL']:
self.xLocation = 0
# for l in range(len(self.feedlinks)):
# feedname = self.feedlinks.keys()[l]
# vars()[feedname] = feed(self.feedlinks.values()[l])
def _on_button_press_event(self, widget, event):
on_click(self, widget, event)
def scroll_event(self, button, event):
if event.direction == gtk.gdk.SCROLL_UP: direction=1
else: direction=-1
if self.pointer_on_title == True:
if direction == 1:
self.movespeed = self.movespeed + 0.5
elif direction == -1:
self.movespeed = self.movespeed - 0.5
def enter_notify(self, widget, event):
self.pointer = self.window.get_pointer()
# print "Enter bar", self.pointer
self.pointer_on_bar = True
if (0 <= self.pointer[0] <= (self.w/7)) and (0 <= self.pointer[1] <= self.h):
# print "On Title"
self.pointer_on_title = True
self.pointer_on_rest = False
elif self.pointer[0] >= (self.w/7) and (0 <= self.pointer[1] <= self.h):
# print "On Rest"
self.pointer_on_rest = True
self.pointer_on_title = False
if not self.movespeed == 0:
self.movespeedbef = self.movespeed
self.movespeed = 0
#problem
def leave_notify(self, widget, event):
self.pointer = self.window.get_pointer()
# print self.pointer
#TEMP SOL
if not (0 < self.pointer[0] < self.w) or not (0 < self.pointer[1] < self.h):
try:
self.movespeed = self.movespeedbef
del self.movespeedbef
except AttributeError:
pass
# print "leave"
self.pointer_on_bar = False
self.pointer_on_rest = False
self.pointer_on_title = False
def query_tooltip_cb(self, widget, x, y, keyboard_tip, tooltip):
if self.pointer_on_rest == True:
try:
for idx in range(self.feedrange[self.feedseq]):
if self.location[idx] < x < (self.location[idx] + self.width[idx]):
tip = html2text.html2text(self.feedcontent[self.feedseq].entries[idx].summary)
self.window.set_tooltip_text(tip)
break
elif self.inner_logo_location[idx] < x < (self.inner_logo_location[idx] + self.inner_logo_width) and not idx == (self.feedrange[self.feedseq]):
self.window.set_tooltip_text(self.feedcontent[self.feedseq].feed.title)
break
except KeyError:
#careful here
pass
if self.pointer_on_title == True:
self.window.set_tooltip_text("Statistics here")
#imagemenu functions
def execute_cb(self, widget, event):
Properties().main()
def opensite_cb(self, widget, event):
webbrowser.open('http://www.feedbar.org')
def quit_cb(self, widget, data = None):
gtk.main_quit()
exit()
def popup_menu_cb(self, widget, button, time, data = None):
if button == 3:
if data:
data.show_all()
data.popup(None, None, gtk.status_icon_position_menu,
3, time, self.statusIcon)
def collapse_cb(self, widget, event):
if event.button == 1:
if not self.collapsed:
self.collapsed = True
self.window.hide()
elif self.collapsed:
self.collapsed = False
self.window.set_position(gtk.WIN_POS_CENTER)
self.window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_DOCK)
self.window.move(0, gtk.gdk.screen_height()-55)
self.window.show()
def main(self):
gtk.main()
if __name__ == "__main__":
bar = Bar()
reactor.run()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import gtk
import webkit
import webbrowser
class PageWindow:
def __init__(self, link):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_decorated(False)
# self.window.set_type_hint(gtk.gdk.WINDOW_TYPE_HINT_TOOLTIP)
self.window.resize(700, 300)
self.window.move((gtk.gdk.screen_width()/2)-(700/2), (gtk.gdk.screen_height())-(300)-56)
self.vbox = gtk.VBox()
self.hbox = gtk.HBox()
button1 =gtk.Button()
button1 = gtk.Button(stock=gtk.STOCK_CLOSE)
button1.connect("clicked", self.destroy_every, None)
button1.connect("clicked", lambda w: self.window.destroy())
self.hbox.pack_start(button1, expand = False)
self.link = link
button2 =gtk.Button("Open In Browser")
image = gtk.Image()
image.set_from_stock(gtk.STOCK_OPEN, gtk.ICON_SIZE_BUTTON)
button2.set_image(image)
button2.connect("clicked", self.open_browser, None)
self.hbox.pack_start(button2)
self.vbox.pack_start(self.hbox, expand = False)
self.hbox2 = gtk.HBox()
self.vscale = gtk.VScale()
self.hbox2.pack_start(self.vscale, expand = False)
self.view = webkit.WebView()
sw = gtk.ScrolledWindow()
sw.set_border_width(5)
sw.add(self.view)
self.hbox2.pack_end(sw, expand = True)
self.vbox.pack_end(self.hbox2)
self.window.add(self.vbox)
self.window.set_border_width(5)
self.window.show_all()
self.view.open(link)
self.view.set_zoom_level(1)
# def main(self):
# gtk.main()
def open_browser(self, widget=None, data=None):
webbrowser.open(self.link)
def destroy_every(self, widget=None, data=None):
self.view.destroy()
del self.view
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os, sys
import gtk
import glib
from django.utils.translation import ugettext_lazy as _
class Properties:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("FeedBar Options")
self.window.set_icon_from_file('/usr/share/pixmaps/feedbar.png')
# self.window.set_resizable(False)
self.window.resize(500, 350)
self.window.move((gtk.gdk.screen_width()/2)-(500/2), (gtk.gdk.screen_height()/2)-(350/2))
# e = gtk.EventBox()
screen = self.window.get_screen()
rgba = screen.get_rgba_colormap()
self.window.set_colormap(rgba)
self.settings = gtk.settings_get_default()
self.settings.set_property("gtk-button-images", True)
self.hbuttonbox = gtk.HButtonBox()
self.hbuttonbox.set_layout(gtk.BUTTONBOX_SPREAD)
button = gtk.Button(stock=gtk.STOCK_APPLY)
self.hbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_CLOSE)
self.hbuttonbox.pack_end(button)
self.generalFrame = gtk.Frame()
hbox1 = gtk.HBox()
label = gtk.Label("Move Speed")
label.set_alignment(xalign=0, yalign=0.5)
label.set_tooltip_text("The speed of the moving text")
hbox1.pack_start(label)
hscale = gtk.HScale()
hscale.set_tooltip_text("Pixels per Second")
hscale.set_value_pos(gtk.POS_RIGHT)
hbox1.pack_end(hscale)
hbox2 = gtk.HBox()
label = gtk.Label("Fetching Interval")
label.set_alignment(xalign=0, yalign=0.5)
label.set_tooltip_text("The time until the feeds are updated again")
hbox2.pack_start(label)
spinbutton = gtk.SpinButton()
hbox2.pack_start(spinbutton, padding = 5)
combobox = gtk.combo_box_new_text()
combobox.append_text("Seconds")
combobox.append_text("Minutes")
combobox.append_text("Hours")
combobox.append_text("Days")
combobox.set_active(1)
hbuttonbox = gtk.HButtonBox()
hbuttonbox.pack_start(combobox)
hbuttonbox.set_layout(gtk.BUTTONBOX_END)
hbox2.pack_start(hbuttonbox, expand = False)
hbox3 = gtk.HBox()
label = gtk.Label("Position")
label.set_alignment(xalign=0, yalign=0.5)
hbox3.pack_start(label)
combobox = gtk.combo_box_new_text()
combobox.append_text("Top")
combobox.append_text("Bottom")
combobox.set_active(1)
hbuttonbox = gtk.HButtonBox()
hbuttonbox.pack_start(combobox)
hbuttonbox.set_layout(gtk.BUTTONBOX_END)
hbox3.pack_start(hbuttonbox, padding = 5)
spinbutton = gtk.SpinButton()
# spinbutton.set_layout(gtk.BUTTONBOX_END)
hbox3.pack_start(spinbutton, expand = False)
hbox3.set_homogeneous(True)
hbox4 = gtk.HBox()
vbox = gtk.VBox()
check_button = gtk.CheckButton(label="Always on Top")
vbox.pack_start(check_button)
check_button = gtk.CheckButton(label="Reserve Window Space")
vbox.pack_start(check_button)
hbox4.pack_start(vbox)
vbox2 = gtk.VBox()
check_button = gtk.CheckButton(label="Run on Startup")
vbox2.pack_start(check_button)
check_button = gtk.CheckButton(label="Something Here")
vbox2.pack_start(check_button)
hbox4.pack_start(vbox2)
generalvbox = gtk.VBox()
generalvbox.set_border_width(25)
generalvbox.pack_start(hbox1)
generalvbox.pack_start(hbox2)
generalvbox.pack_start(hbox3)
generalvbox.pack_start(hbox4)
self.generalFrame.add(generalvbox)
self.feedsFrame = gtk.Frame()
vbox = gtk.VBox()
hbox = gtk.HBox()
scrolledwindow = gtk.ScrolledWindow()
self.treeview = gtk.TreeView()
cell = gtk.CellRendererText()
self.tvcolumn = gtk.TreeViewColumn("Test 1st", cell)
self.treeview.append_column(self.tvcolumn)
scrolledwindow.add_with_viewport(self.treeview)
scrolledwindow.set_property("hscrollbar-policy", gtk.POLICY_AUTOMATIC)
scrolledwindow.set_property("vscrollbar-policy", gtk.POLICY_AUTOMATIC)
hbox.pack_start(scrolledwindow)
vbuttonbox = gtk.VButtonBox()
button = gtk.Button(stock=gtk.STOCK_GOTO_TOP)
vbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_GO_UP)
vbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_ADD)
vbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_REMOVE)
vbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_GO_DOWN)
vbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_GOTO_BOTTOM)
vbuttonbox.pack_start(button)
vbuttonbox.set_layout(gtk.BUTTONBOX_CENTER)
hbox.pack_end(vbuttonbox, False)
hbox.set_homogeneous(False)
vbox.pack_start(hbox)
hbuttonbox = gtk.HButtonBox()
button = gtk.Button(stock=gtk.STOCK_YES)
hbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_YES)
hbuttonbox.pack_start(button)
button = gtk.Button(stock=gtk.STOCK_YES)
hbuttonbox.pack_start(button)
vbox.pack_end(hbuttonbox, False, padding = 5)
self.feedsFrame.add(vbox)
self.aboutFrame = gtk.Frame()
self.Label2 = gtk.Label("Hi")
self.Label2.set_line_wrap(True)
self.aboutFrame.add(self.Label2)
self.notebook = gtk.Notebook()
self.notebook.append_page(self.generalFrame, gtk.Label("General"))
self.notebook.child_set_property(self.generalFrame, "tab-expand", True)
self.notebook.append_page(self.aboutFrame, gtk.Label("Looks"))
self.notebook.append_page(self.feedsFrame, gtk.Label("Feeds"))
self.notebook.append_page(self.aboutFrame, gtk.Label("Customize"))
self.notebook.append_page(self.aboutFrame, gtk.Label("About"))
self.notebook.child_set_property(self.aboutFrame, "tab-expand", True)
self.notebook.set_property('homogeneous', True)
self.vbox = gtk.VBox()
self.vbox.pack_start(self.notebook)
self.vbox.pack_start(self.hbuttonbox, False, padding = 20)
self.vbox.set_homogeneous(False)
self.window.add(self.vbox)
self.window.show_all()
def main(self):
gtk.main()
if __name__ == "__main__":
properties = Properties()
properties.main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from twisted.web.client import getPage
import feedparser
import cairo
import pango
import os
import glib
from getlogo import *
DIR_IMG = os.environ['HOME'] + '/.feedbar/images/'
def RSSdraw(self, window_to_draw, cr):
self.window = window_to_draw
# detecting pointer
if self.pointer_on_bar == True:
# print "checking"
self.pointer = self.window.get_pointer()
if (0 <= self.pointer[0] <= (self.w/7)) and (0 <= self.pointer[1] <= self.h):
# print "On Title"
self.pointer_on_title = True
self.pointer_on_rest = False
#test
self.tip = "Title"
elif self.pointer[0] >= (self.w/7) and (0 <= self.pointer[1] <= self.h):
# print "On Rest"
self.pointer_on_rest = True
self.pointer_on_title = False
#test
self.tip = "Rest"
if self.pointer_on_title == True and self.movespeed == 0:
try:
self.movespeed = self.movespeedbef
del self.movespeedbef
except AttributeError:
pass
if self.pointer_on_rest == True and self.movespeed != 0:
self.movespeedbef = self.movespeed
self.movespeed = 0
try:
# determining best font size
if not self.feeds[self.feedseq]['pango']:
try:
font_size = self.font_size
except AttributeError:
font_size = self.h
cr.set_font_size(font_size)
heights = {}
for idx in range(self.feedrange[self.feedseq]):
(x, y, width, height, dx, dy) = cr.text_extents(self.feedcontent[self.feedseq].entries[idx].title)
heights[idx] = height
idx_of_max_height = max((v,k) for (k,v) in heights.iteritems())[1]
while height > (self.h * 0.64):
font_size -= 1
cr.set_font_size(font_size)
(x, y, width, height, dx, dy) = cr.text_extents(self.feedcontent[self.feedseq].entries[idx_of_max_height].title)
# print height, idx_of_max_height, font_size
self.font_size = font_size
elif self.feeds[self.feedseq]['pango']:
try:
font_size = self.font_size
except AttributeError:
font_size = self.h
layout = cr.create_layout()
font = pango.FontDescription(self.font)
font.set_size(int(font_size)*pango.SCALE)
layout.set_font_description(font)
heights = {}
for idx in range(self.feedrange[self.feedseq]):
layout.set_text(self.feedcontent[self.feedseq].entries[idx].title)
width, heights[idx] = layout.get_pixel_size()
idx_of_max_height = max((v,k) for (k,v) in heights.iteritems())[1]
height = max((v,k) for (k,v) in heights.iteritems())[0]
while height > (self.h * 0.64):
font_size -= 1
font.set_size(int(font_size)*pango.SCALE)
layout.set_font_description(font)
layout.set_text(self.feedcontent[self.feedseq].entries[idx].title)
width, height = layout.get_pixel_size()
# print height, idx_of_max_height, font_size
self.font_size = font_size
for idx in range(self.feedrange[self.feedseq]):
if not self.feeds[self.feedseq]['pango']:
#temp
cr.set_font_size(self.font_size + 1)
if idx in self.width:
width = self.width[idx]
elif idx not in self.width:
(x, y, width, height, dx, dy) = cr.text_extents(self.feedcontent[self.feedseq].entries[idx].title)
self.width[idx] = width
self.height_diff[idx] = -y - (height/2)
self.height_diff_min = min((value, key) for (key, value) in self.height_diff.items())[0]
# print x, y, width, height, dx, dy, self.height_diff[idx], self.height_diff_min
elif self.feeds[self.feedseq]['pango']:
if idx in self.layout:
layout = self.layout[idx]
elif idx not in self.layout:
layout = cr.create_layout()
font = pango.FontDescription(self.font)
font.set_size(int(self.font_size + 1)*pango.SCALE)
layout.set_font_description(font)
layout.set_text(self.feedcontent[self.feedseq].entries[idx].title)
self.width[idx], self.height[idx] = layout.get_pixel_size()
self.layout[idx] = layout
# print self.height[idx]
if not idx == 0:
if not self.feeds[self.feedseq]['RtL']:
self.location[idx] = ((self.location[idx-1] + self.width[idx-1]) + 100 + self.inner_logo_width)
elif self.feeds[self.feedseq]['RtL']:
self.location[idx] = ((self.location[idx-1] - self.width[idx]) - 100 - self.inner_logo_width)
# print self.location[idx], self.location[idx - 1], self.width[idx - 1]
elif idx == 0:
if not self.feeds[self.feedseq]['RtL']:
self.location[idx] = self.xLocation
elif self.feeds[self.feedseq]['RtL']:
self.location[idx] = self.xLocation - self.width[idx]
self.startpoint = (self.window.allocation.width + 200)
self.stoppoint = (-self.width[idx] -100)
if self.location[idx] <= self.startpoint and self.location[idx] >= self.stoppoint:
if not self.feeds[self.feedseq]['pango']:
cr.move_to(self.location[idx], self.h/2 + self.height_diff_min)
cr.show_text(self.feedcontent[self.feedseq].entries[idx].title)
elif self.feeds[self.feedseq]['pango']:
cr.move_to(self.location[idx], self.h/2 - self.height[idx]/2)
cr.show_layout(self.layout[idx])
cr.save()
# drawing image (comsumes a bit more cpu)
try:
inner_surface = self.inner_surface
inner_logo_height = self.inner_logo_height
inner_logo_width = self.inner_logo_width
inner_logo_opt_h = self.inner_logo_opt_h
except AttributeError:
inner_surface = cairo.ImageSurface.create_from_png(DIR_IMG + self.logo_name)
inner_logo_height = float(inner_surface.get_height())
inner_logo_width = float(inner_surface.get_width())
new_height = self.h * 0.8
inner_logo_opt_h = (self.h/2 - (inner_logo_height)/2)
self.inner_surface = inner_surface
self.inner_logo_height = inner_logo_height
self.inner_logo_width = inner_logo_width
self.inner_logo_opt_h = inner_logo_opt_h
# print self.h, inner_logo_opt_h
if not idx == (self.feedrange[self.feedseq] - 1):
if not self.feeds[self.feedseq]['RtL']:
self.inner_logo_location[idx] = self.location[idx] + self.width[idx] + 50
elif self.feeds[self.feedseq]['RtL']:
self.inner_logo_location[idx] = self.location[idx] - (inner_logo_width) - 50
if self.inner_logo_location[idx] <= self.startpoint and self.inner_logo_location[idx] >= self.stoppoint:
cr.rectangle (self.inner_logo_location[idx], inner_logo_opt_h, inner_logo_width, inner_logo_height)
cr.set_source_surface(inner_surface, self.inner_logo_location[idx], inner_logo_opt_h)
cr.fill()
cr.restore()
except KeyError as keyErr:
# print "Loading, current sequence", self.feedseq, keyErr
if not self.feeds[self.feedseq]['RtL']:
self.xLocation = self.w + 100
elif self.feeds[self.feedseq]['RtL']:
self.xLocation = 0
def RSSdownload(self, feedlink, seq):
# Downloading the feed page using twisted
for n in range(3):
# try:
self.downloadedfeed = getPage(feedlink)
self.downloadedfeed.addCallback(RSSmanage, self, seq)
#test
self.downloadedfeed.addErrback(errorHandler, self, seq)
print "Starting downloading for feed {0}:{1}".format(seq, self.feeds[seq]['feed_title'])
break
# except:
# print "Problem downloading feed {0}:{1}".format(seq, self.feeds[seq][1])
def RSSmanage(output, self, seq):
self.out[seq] = output
# print output
print "Done downloading for feed {0}:{1}".format(seq, self.feeds[seq]['feed_title'])
#1st problem
if seq == 0 and self.feedseq == 0 and not self.collapsed:
parse(self, output, seq)
self.task_done = True
def parse(self, string, seq):
# Parsing the downloaded feed
print "Parsing feed {0}:{1}".format(seq, self.feeds[seq]['feed_title'])
self.feedcontent[seq] = feedparser.parse(string)
# determining the number of entries to show
if self.feeds[self.feedseq]['feedmax'] >= len(self.feedcontent[seq]['entries']):
self.feedrange[seq] = len(self.feedcontent[seq]['entries'])
elif self.feeds[self.feedseq]['feedmax'] < len(self.feedcontent[seq]['entries']):
self.feedrange[seq] = self.feeds[self.feedseq]['feedmax']
# the logo link
self.logo_name = False
try:
if self.feedcontent[seq].feed.links[0].type == 'application/atom+xml':
self.logo[seq] = self.feedcontent[seq].feed.logo
else:
self.logo[seq] = self.feedcontent[seq].feed.image.href
# dont forget the error
except AttributeError:
self.logo[seq] = 'http://taljurf.fedorapeople.org/Scratch/FeedLine/icon.png'
logo_name_old = os.path.basename(self.logo[seq])
basename, extension = os.path.splitext(logo_name_old)
self.logo_name = basename + '.png'
# print self.logo_name
getlogo(str(self.logo[seq]), self.h)
# print self.logo[seq]
print "Parsed feed {0}:{1}".format(seq, self.feeds[seq]['feed_title'])
#TESSTT vvvvvvvvv
# len(self.feedcontent[seq]['entries'])
# print len(self.feedcontent[seq]['entries'])
def errorHandler(failure, self, seq):
"""Error Handling"""
print "Connection Problem, retrying in 20 seconds"
glib.timeout_add_seconds(20, get_again, self, seq)
def get_again(self, seq):
feed_type = self.feeds[seq]['feed_type']
self.optionsavail[feed_type][1](self, self.feeds[seq]['feed_link'], seq)
| Python |
#!/usr/bin/env python
import sys, os, glob
from pkg_resources import require, DistributionNotFound, VersionConflict
from setuptools import setup
#try:
# require('feedbar')
# print
# print 'You have Feedbar installed.'
# print 'You need to remove Feedbar from your site-packages'
# print 'before installing this software, or conflicts may result.'
# print
# sys.exit()
#except (DistributionNotFound, VersionConflict):
# pass
#should be built with prefix /usr, or add /usr here
setup(name='feedbar',
version='0.01',
description='A compact feed ticker',
author='Tareq Aljurf',
author_email='taljurf@fedoraproject.org',
url='http://www.feedbar.org',
packages=["feedbar"],
package_dir={"feedbar": "src/"},
scripts = ['src/feedbar'],
classifiers=[
'Development Status :: Early Development',
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
#license correct
'License :: GNU General Public License Version 3 (GPLv3)',
'Operating System :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
# 'Topic :: Multimedia :: Sound :: Players',
],
data_files=[
('/usr/share/applications', ['feedbar.desktop']),
('/usr/share/pixmaps', glob.glob('data/pixmaps/*')),
('/usr/share/feedbar/images', glob.glob('data/images/*'))
# ('share/locale/pl/LC_MESSAGES', ['mo/pl/mpdBrowser.mo']),
# ('share/locale/fr/LC_MESSAGES', ['mo/fr/mpdBrowser.mo']),
# ('share/locale/it/LC_MESSAGES', ['mo/it/mpdBrowser.mo'])
]
)
| Python |
import feeds
def simple_format_link(entry):
return "<a href=\"%s\">%s</a>" % (entry.link, entry.title)
def get_feed_html(feed_url, n = 5, pattern = None, list_css_class = "feed-default", link_formatter = simple_format_link):
f = feeds.Feeds()
html = '<ul class="feed-entries %s">\n' % list_css_class
for e in f.get_recent(f._get_feed_id(feed_url, False), n, pattern):
html = html + (" <li>%s</li>\n" % link_formatter(e))
f.close()
html = html + "</ul>"
return html
if __name__ == '__main__':
import sys
print get_feed_html(sys.argv[1])
| Python |
import sqlite3
import sys
import feedparser
from datetime import datetime
DATABASE = 'feeds.sqlite'
class Entry(object): pass
class Feeds(object):
def __init__(self):
self.conn = sqlite3.connect(DATABASE)
self.conn.row_factory = sqlite3.Row
self._create_tables()
def _create_tables(self):
c = self.conn.cursor()
"""create tables if they don't already exist"""
c.execute('CREATE TABLE IF NOT EXISTS feeds (feed_id INTEGER PRIMARY KEY AUTOINCREMENT, url VARCHAR(1000));')
c.execute('CREATE TABLE IF NOT EXISTS entries (entry_id INTEGER PRIMARY KEY AUTOINCREMENT, feed_id INTEGER, id INTEGER, link VARCHAR(1000), title VARCHAR(1000), summary TEXT, published DATETIME, updated DATETIME);')
c.close()
def _process_entries(self, feed_id, entries):
c = self.conn.cursor()
new = 0
seen = 0
for entry in entries:
c.execute('SELECT * FROM entries WHERE feed_id = ? AND id = ?', (feed_id, entry.id))
if len(c.fetchall()) == 0:
new = new + 1
c.execute('INSERT INTO entries (feed_id, id, link, title, summary, published, updated) VALUES (?, ?,?,?,?,?,?)', (feed_id, entry.id, entry.link, entry.title, entry.summary, datetime(*entry.published_parsed[:6]), datetime(*entry.updated_parsed[:6])))
else:
seen = seen + 1
c.close()
self._commit()
return (new, seen)
def _commit(self):
self.conn.commit()
def _get_feed_id(self, feed_url, add_if_new = True):
c = self.conn.cursor()
try:
c.execute('SELECT feed_id FROM feeds WHERE url = ?', (feed_url,))
row = c.fetchone()
if row:
feed_id = row[0]
elif add_if_new:
c.execute('INSERT INTO feeds (url) VALUES (?)', (feed_url,))
feed_id = c.lastrowid
self._commit()
else:
raise Exception("No feed data for %s" % feed_url)
finally:
c.close()
return feed_id
def _make_entry(self, cursor, row):
e = Entry()
for idx, col in enumerate(cursor.description):
setattr(e, col[0], row[idx])
return e
def update(self, feed_url):
entries = feedparser.parse(feed_url).entries
feed_id = self._get_feed_id(feed_url)
counts = self._process_entries(feed_id, entries)
return counts
def get_recent(self, feed_url, n = 5, pattern = None):
feed_id = self._get_feed_id(feed_url)
c = self.conn.cursor()
if pattern:
c.execute('SELECT * FROM entries WHERE feed_id = ? AND title LIKE ? ' + \
'ORDER BY published DESC LIMIT ?', (feed_id, pattern, n))
else:
c.execute('SELECT * FROM entries WHERE feed_id = ? ORDER BY published DESC LIMIT ?', (feed_id, n))
recent = [self._make_entry(c, row) for row in c.fetchall()]
c.close()
return recent
def close(self):
self.conn.close()
| Python |
import feeds
f = feeds.Feeds()
print [e.link for e in f.get_recent('stackoverflow.xml')]
f.close()
| Python |
#!/usr/bin/python2
import subprocess
import signal
code = r""""
(load "~/quicklisp/setup.lisp")
(ql:quickload 'feedcircuit)
(ql:quickload 's-http-server)
(ql:quickload 's-utils)
(ql:quickload 'puri)
(rename-package :s-http-server :sfs)
(defun write-text-response (request stream &optional text)
(unless text (setf (sfs:get-keep-alive request) nil))
(sfs:write-http-response-status-line stream)
(sfs:write-http-response-headers
(sfs:standard-http-response-headers
request :content-type "text/plain;charset=utf-8"
:content-length (when text (file-string-length stream text))) stream)
(sfs:write-http-response-line "" stream)
(when text (write-string text stream))
t)
(defun list-handler (server handler request stream)
(declare (ignore server))
(let ((body (format nil "~{~a~^~%~}"
(mapcar #'file-namestring
(feedcircuit:list-ebooks (second handler))))))
(write-text-response request stream body)))
(defun sync-handler (server handler request stream)
(declare (ignore server))
(write-text-response request stream)
(let ((config (with-open-file (f (second handler)) (read f)))
(b-out (make-broadcast-stream stream *standard-output*))
(b-err (make-broadcast-stream stream *error-output*)))
(let ((*standard-output* b-out)
(*error-output* b-err))
(apply #'feedcircuit:ebook-from-feeds config))))
(defun parse-query (query)
(flet ((split (str sep) (s-utils:tokens str :separators (list sep)))
(unescape (str) (puri::decode-escaped-encoding str t nil)))
(mapcar #'(lambda (param) (mapcar #'unescape (split param #\=)))
(split query #\&))))
(defun get-urls (query)
(let ((params (parse-query query)))
(mapcar #'second
(remove-if-not #'(lambda (param) (equalp (first param) "url")) params))))
(defun grab-handler (server handler request stream)
(declare (ignore server))
(let ((urls (get-urls (puri:uri-query (sfs:get-uri request))))
(b-out (make-broadcast-stream stream *standard-output*))
(b-err (make-broadcast-stream stream *error-output*)))
(let ((*standard-output* b-out)
(*error-output* b-err))
(write-text-response request stream)
(write-string (feedcircuit:ebook-from-urls urls (second handler)) stream))))
(let* ((server (sfs:make-s-http-server :port 8889))
(config-file "config")
(config (with-open-file (f config-file) (read f)))
(work-dir (namestring (merge-pathnames (or (getf config :work-dir) "")))))
(sfs:register-context-handler server "/sync" #'sync-handler
:arguments (list config-file))
(sfs:register-context-handler server "/list" #'list-handler
:arguments (list work-dir))
(sfs:register-context-handler server "/grab" #'grab-handler
:arguments (list work-dir))
(sfs:register-context-handler server "/" #'sfs:static-resource-handler
:arguments (list work-dir) :at-end-p t)
(sfs:start-server server)
(handler-case (read) (serious-condition ())))
"""
lisp = ["/usr/bin/sbcl", "--noinform", "--disable-ldb", "--lose-on-corruption",
"--end-runtime-options", "--no-userinit", "--no-sysinit",
"--disable-debugger", "--noprint", "--end-toplevel-options"]
proc = subprocess.Popen(lisp, stdin=subprocess.PIPE)
proc.stdin.write(code[1:])
proc.stdin.flush()
try:
proc.wait()
except:
pass
proc.stdin.close()
| Python |
#!/usr/bin/python2
import subprocess
import signal
code = r""""
(load "~/quicklisp/setup.lisp")
(ql:quickload 'feedcircuit)
(ql:quickload 's-http-server)
(ql:quickload 's-utils)
(ql:quickload 'puri)
(rename-package :s-http-server :sfs)
(defun write-text-response (request stream &optional text)
(unless text (setf (sfs:get-keep-alive request) nil))
(sfs:write-http-response-status-line stream)
(sfs:write-http-response-headers
(sfs:standard-http-response-headers
request :content-type "text/plain;charset=utf-8"
:content-length (when text (file-string-length stream text))) stream)
(sfs:write-http-response-line "" stream)
(when text (write-string text stream))
t)
(defun list-handler (server handler request stream)
(declare (ignore server))
(let ((body (format nil "~{~a~^~%~}"
(mapcar #'file-namestring
(feedcircuit:list-ebooks (second handler))))))
(write-text-response request stream body)))
(defun sync-handler (server handler request stream)
(declare (ignore server))
(write-text-response request stream)
(let ((config (with-open-file (f (second handler)) (read f)))
(b-out (make-broadcast-stream stream *standard-output*))
(b-err (make-broadcast-stream stream *error-output*)))
(let ((*standard-output* b-out)
(*error-output* b-err))
(apply #'feedcircuit:ebook-from-feeds config))))
(defun parse-query (query)
(flet ((split (str sep) (s-utils:tokens str :separators (list sep)))
(unescape (str) (puri::decode-escaped-encoding str t nil)))
(mapcar #'(lambda (param) (mapcar #'unescape (split param #\=)))
(split query #\&))))
(defun get-urls (query)
(let ((params (parse-query query)))
(mapcar #'second
(remove-if-not #'(lambda (param) (equalp (first param) "url")) params))))
(defun grab-handler (server handler request stream)
(declare (ignore server))
(let ((urls (get-urls (puri:uri-query (sfs:get-uri request))))
(b-out (make-broadcast-stream stream *standard-output*))
(b-err (make-broadcast-stream stream *error-output*)))
(let ((*standard-output* b-out)
(*error-output* b-err))
(write-text-response request stream)
(write-string (feedcircuit:ebook-from-urls urls (second handler)) stream))))
(let* ((server (sfs:make-s-http-server :port 8889))
(config-file "config")
(config (with-open-file (f config-file) (read f)))
(work-dir (namestring (merge-pathnames (or (getf config :work-dir) "")))))
(sfs:register-context-handler server "/sync" #'sync-handler
:arguments (list config-file))
(sfs:register-context-handler server "/list" #'list-handler
:arguments (list work-dir))
(sfs:register-context-handler server "/grab" #'grab-handler
:arguments (list work-dir))
(sfs:register-context-handler server "/" #'sfs:static-resource-handler
:arguments (list work-dir) :at-end-p t)
(sfs:start-server server)
(handler-case (read) (serious-condition ())))
"""
lisp = ["/usr/bin/sbcl", "--noinform", "--disable-ldb", "--lose-on-corruption",
"--end-runtime-options", "--no-userinit", "--no-sysinit",
"--disable-debugger", "--noprint", "--end-toplevel-options"]
proc = subprocess.Popen(lisp, stdin=subprocess.PIPE)
proc.stdin.write(code[1:])
proc.stdin.flush()
try:
proc.wait()
except:
pass
proc.stdin.close()
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
import sys
import os
import re
typeRinstructions = ['add', 'sub', 'and', 'or', 'nor', 'slt', 'jr']
typeIinstructions = ['addi', 'ori', 'lui']
typeBRANCHinstructions = ['beq']
typeMEMinstructions = ['lw', 'sw']
typeJinstructions = ['j', 'jal']
typeSHIFTinstructions = ['sll', 'srl']
typePseudoInstructions = ['li']
instruction2opcode = {
'ori': 0xd,
'lui': 0xf,
'addi': 0x8,
'beq': 0x4,
'lw': 0x23,
'sw': 0x2b,
'j': 0x2,
'jal': 0x3,
}
operation2funct = {
'add': 0x20,
'sub': 0x22,
'and': 0x24,
'or': 0x25,
'nor': 0x27,
'slt': 0x2a,
'sll': 0x00,
'srl': 0x02,
'jr': 0x08,
}
regname2number = {
'$zero': 0,
'$at': 1,
'$v0': 2,
'$v1': 3,
'$a0': 4,'$a1': 5,'$a2': 6,'$a3': 7,
'$t0': 8,'$t1': 9,'$t2': 10,'$t3': 11,'$t4': 12,'$t5': 13,'$t6': 14,'$t7': 15,
'$s0': 16,'$s1': 17,'$s2': 18,'$s3': 19,'$s4': 20,'$s5': 21,'$s6': 22,'$s7': 23,
'$t8': 24,'$t9': 25,
'$k0': 26,'$k1': 27,
'$gp': 28,
'$sp': 29,
'$fp': 30,
'$ra': 31,
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '12': 12, '13': 13, '14': 14, '15': 15,
'16': 16, '17': 17, '18': 18, '19': 19, '20': 20, '21': 21, '22': 22, '23': 23, '24': 24, '25': 25, '26': 26, '27': 27, '28': 28, '29': 29, '30': 30, '31': 31,
}
reservedwords = typeRinstructions + typeIinstructions + typeJinstructions + typeMEMinstructions + instruction2opcode.keys() + operation2funct.keys() + regname2number.keys() + typeBRANCHinstructions + typeSHIFTinstructions
labels = dict()
def isinmm(word):
return (word[0] in map(str, range(10)))
def isreg(word):
return (word[0] == '$') or (word in map(str, range(32)))
def isdirective(line):
return line[0] == '.'
def getlinelabelandinstruction(line):
label,sep,instruction = map(str.strip, line.partition(':'))
if (sep == '') and (instruction == ''):
label,instruction = instruction,label
label = label.lower()
return (label, instruction)
def definelabels(linesplittedcode):
global labels
i = -1
for line in linesplittedcode:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if label != '':
assert(not (label in reservedwords))
assert(not (label[0] in map(str, range(10))))
labels[label] = i
if instruction == '':
i = i - 1
def goFirstForPseudoInstructions(linesplittedcode):
#global labels
i = -1
for line in linesplittedcode:
i = i + 1
#Saltear lineas en blanco
if line.strip() == '':
continue
label, instruction = getlinelabelandinstruction(line)
#salteo lineas con labels
if instruction == '':
continue
operation, operands = map(str.lower, re.split('\s+', instruction, 1))
operands = map(str.strip, operands.split(','))
if operation in typePseudoInstructions:
if operation == 'li':
imm = int(operands[1])
high=imm >> 16
low=imm & 0x0000FFFF
line1 = "lui $at, "+str(high)
if label != '':
line1 = label+":\t"+line1
line2 = "ori "+operands[0]+", $at, "+str(low)
#print linesplittedcode[i]
linesplittedcode.pop(i)
linesplittedcode.insert(i,line1)
linesplittedcode.insert(i+1,line2)
#if instruction == '':
# i = i - 1
def mipsminiassembler(startaddr, code):
global labels
labels = dict()
lines = map(str.strip, code.split('\n'))
#print lines
# Pasada cero para pseudoINSTRUCCIONES
goFirstForPseudoInstructions(lines)
#print lines
#os._exit(1)
# Primera pasada para buscar las etiquetas
definelabels(lines)
# Segunda pasada para ensamblar
i = -1
for line in lines:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if instruction == '':
i = i - 1
continue
operation, operands = map(str.lower, re.split('\s+', instruction, 1))
operands = map(str.strip, operands.split(','))
if operation in typeRinstructions:
if len(operands) == 1:
rs = regname2number[operands[0]]
rt = regname2number["$zero"]
rd = regname2number["$zero"]
else:
rs = regname2number[operands[1]]
rt = regname2number[operands[2]]
rd = regname2number[operands[0]]
funct = operation2funct[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b000000{:05b}{:05b}{:05b}00000{:06b}; // {}".format(i, rs, rt, rd,funct, line)
elif operation in typeIinstructions:
#print operands
if len(operands) < 3:
rs = regname2number["$zero"]
rt = regname2number[operands[0]]
inmmediate = int(operands[1]) & 0xFFFF # No more than 16 bits
elif len(operands) == 3:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeBRANCHinstructions:
rs = regname2number[operands[0]]
rt = regname2number[operands[1]]
if not isinmm(operands[2]):
# Then is a label. Convert it to offset from PC + 4
operands[2] = str((labels[operands[2].lower()]*4+startaddr - (i*4 + 4)) >> 2)
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeMEMinstructions:
rt = regname2number[operands[0]]
offset,rs = map(lambda e: e.replace(')', ''), operands[1].split('('))
if not isinmm(offset):
offset = str(labels[offset.lower()]*4+startaddr)
rs = regname2number[rs]
inmmediate = int(offset) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeJinstructions:
opcode = instruction2opcode[operation]
if not isinmm(operands[0]):
operands[0] = str(labels[operands[0].lower()]*4+startaddr)
address = (int(operands[0]) >> 2) & 0x3FFFFFF
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:026b}; // {}".format(i, opcode, address, line)
elif operation in typeSHIFTinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
shiftamount = operands[2]
assert(isinmm(shiftamount))
shiftamount = int(shiftamount)
assert(shiftamount >= 0)
funct = operation2funct[operation]
for j in range(shiftamount):
if j == 0:
# La primera vez, shiftear el registro pedido
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rs, rt, funct, line)
else:
# El resto de las veces shiftear el registro "resultado" para que los shifts sean acumulativos
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rt, rt, funct, line)
i = i + 1
i = i - 1
else:
assert(False)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Uso: mipsas.py archivo.asm"
exit(1)
filename = sys.argv[1]
if not os.path.isfile(filename):
print "No se encuentra el archivo {}".format(filename)
exit(1)
f = open(filename, 'r')
filecontent = f.read()
f.close()
mipsminiassembler(0, filecontent)
| Python |
#!/usr/bin/python
import sys
import os
import re
typeRinstructions = ['add', 'sub', 'and', 'or', 'nor', 'slt']
typeIinstructions = ['addi']
typeBRANCHinstructions = ['beq']
typeMEMinstructions = ['lw', 'sw']
typeJinstructions = ['j']
typeSHIFTinstructions = ['sll', 'srl']
instruction2opcode = {
'addi': 0x8,
'beq': 0x4,
'lw': 0x23,
'sw': 0x2b,
'j': 0x2,
}
operation2funct = {
'add': 0x20,
'sub': 0x22,
'and': 0x24,
'or': 0x25,
'nor': 0x27,
'slt': 0x2a,
'sll': 0x00,
'srl': 0x02,
}
regname2number = {
'$zero': 0,
'$at': 1,
'$v0': 2,
'$v1': 3,
'$a0': 4,'$a1': 5,'$a2': 6,'$a3': 7,
'$t0': 8,'$t1': 9,'$t2': 10,'$t3': 11,'$t4': 12,'$t5': 13,'$t6': 14,'$t7': 15,
'$s0': 16,'$s1': 17,'$s2': 18,'$s3': 19,'$s4': 20,'$s5': 21,'$s6': 22,'$s7': 23,
'$t8': 24,'$t9': 25,
'$k0': 26,'$k1': 27,
'$gp': 28,
'$sp': 29,
'$fp': 30,
'$ra': 31,
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '12': 12, '13': 13, '14': 14, '15': 15,
'16': 16, '17': 17, '18': 18, '19': 19, '20': 20, '21': 21, '22': 22, '23': 23, '24': 24, '25': 25, '26': 26, '27': 27, '28': 28, '29': 29, '30': 30, '31': 31,
}
reservedwords = typeRinstructions + typeIinstructions + typeJinstructions + typeMEMinstructions + instruction2opcode.keys() + operation2funct.keys() + regname2number.keys() + typeBRANCHinstructions + typeSHIFTinstructions
labels = dict()
def isinmm(word):
return (word[0] in map(str, range(10)))
def isreg(word):
return (word[0] == '$') or (word in map(str, range(32)))
def isdirective(line):
return line[0] == '.'
def getlinelabelandinstruction(line):
label,sep,instruction = map(str.strip, line.partition(':'))
if (sep == '') and (instruction == ''):
label,instruction = instruction,label
label = label.lower()
return (label, instruction)
def definelabels(linesplittedcode):
global labels
i = -1
for line in linesplittedcode:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if label != '':
assert(not (label in reservedwords))
assert(not (label[0] in map(str, range(10))))
labels[label] = i
if instruction == '':
i = i - 1
def mipsminiassembler(startaddr, code):
global labels
labels = dict()
lines = map(str.strip, code.split('\n'))
# Primera pasada para buscar las etiquetas
definelabels(lines)
# Segunda pasada para ensamblar
i = -1
for line in lines:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if instruction == '':
i = i - 1
continue
operation, operands = map(str.lower, re.split('\s+', instruction, 1))
operands = map(str.strip, operands.split(','))
if operation in typeRinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[2]]
rd = regname2number[operands[0]]
funct = operation2funct[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b000000{:05b}{:05b}{:05b}00000{:06b}; // {}".format(i, rs, rt, rd,funct, line)
elif operation in typeIinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeBRANCHinstructions:
rs = regname2number[operands[0]]
rt = regname2number[operands[1]]
if not isinmm(operands[2]):
# Then is a label. Convert it to offset from PC + 4
operands[2] = str((labels[operands[2].lower()]*4+startaddr - (i*4 + 4)) >> 2)
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeMEMinstructions:
rt = regname2number[operands[0]]
offset,rs = map(lambda e: e.replace(')', ''), operands[1].split('('))
if not isinmm(offset):
offset = str(labels[offset.lower()]*4+startaddr)
rs = regname2number[rs]
inmmediate = int(offset) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeJinstructions:
opcode = instruction2opcode[operation]
if not isinmm(operands[0]):
operands[0] = str(labels[operands[0].lower()]*4+startaddr)
address = (int(operands[0]) >> 2) & 0x3FFFFFF
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:026b}; // {}".format(i, opcode, address, line)
elif operation in typeSHIFTinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
shiftamount = operands[2]
assert(isinmm(shiftamount))
shiftamount = int(shiftamount)
assert(shiftamount >= 0)
funct = operation2funct[operation]
for j in range(shiftamount):
if j == 0:
# La primera vez, shiftear el registro pedido
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rs, rt, funct, line)
else:
# El resto de las veces shiftear el registro "resultado" para que los shifts sean acumulativos
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rt, rt, funct, line)
i = i + 1
i = i - 1
else:
assert(False)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Uso: mipsas.py archivo.asm"
exit(1)
filename = sys.argv[1]
if not os.path.isfile(filename):
print "No se encuentra el archivo {}".format(filename)
exit(1)
f = open(filename, 'r')
filecontent = f.read()
f.close()
mipsminiassembler(0, filecontent)
| Python |
#!/usr/bin/python
import sys
import os
import re
typeRinstructions = ['add', 'sub', 'and', 'or', 'nor', 'slt']
typeIinstructions = ['addi']
typeBRANCHinstructions = ['beq']
typeMEMinstructions = ['lw', 'sw']
typeJinstructions = ['j']
typeSHIFTinstructions = ['sll', 'srl']
instruction2opcode = {
'addi': 0x8,
'beq': 0x4,
'lw': 0x23,
'sw': 0x2b,
'j': 0x2,
}
operation2funct = {
'add': 0x20,
'sub': 0x22,
'and': 0x24,
'or': 0x25,
'nor': 0x27,
'slt': 0x2a,
'sll': 0x00,
'srl': 0x02,
}
regname2number = {
'$zero': 0,
'$at': 1,
'$v0': 2,
'$v1': 3,
'$a0': 4,'$a1': 5,'$a2': 6,'$a3': 7,
'$t0': 8,'$t1': 9,'$t2': 10,'$t3': 11,'$t4': 12,'$t5': 13,'$t6': 14,'$t7': 15,
'$s0': 16,'$s1': 17,'$s2': 18,'$s3': 19,'$s4': 20,'$s5': 21,'$s6': 22,'$s7': 23,
'$t8': 24,'$t9': 25,
'$k0': 26,'$k1': 27,
'$gp': 28,
'$sp': 29,
'$fp': 30,
'$ra': 31,
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '12': 12, '13': 13, '14': 14, '15': 15,
'16': 16, '17': 17, '18': 18, '19': 19, '20': 20, '21': 21, '22': 22, '23': 23, '24': 24, '25': 25, '26': 26, '27': 27, '28': 28, '29': 29, '30': 30, '31': 31,
}
reservedwords = typeRinstructions + typeIinstructions + typeJinstructions + typeMEMinstructions + instruction2opcode.keys() + operation2funct.keys() + regname2number.keys() + typeBRANCHinstructions + typeSHIFTinstructions
labels = dict()
def isinmm(word):
return (word[0] in map(str, range(10)))
def isreg(word):
return (word[0] == '$') or (word in map(str, range(32)))
def isdirective(line):
return line[0] == '.'
def getlinelabelandinstruction(line):
label,sep,instruction = map(str.strip, line.partition(':'))
if (sep == '') and (instruction == ''):
label,instruction = instruction,label
label = label.lower()
return (label, instruction)
def definelabels(linesplittedcode):
global labels
i = -1
for line in linesplittedcode:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if label != '':
assert(not (label in reservedwords))
assert(not (label[0] in map(str, range(10))))
labels[label] = i
if instruction == '':
i = i - 1
def mipsminiassembler(startaddr, code):
global labels
labels = dict()
lines = map(str.strip, code.split('\n'))
# Primera pasada para buscar las etiquetas
definelabels(lines)
# Segunda pasada para ensamblar
i = -1
for line in lines:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if instruction == '':
i = i - 1
continue
operation, operands = map(str.lower, re.split('\s+', instruction, 1))
operands = map(str.strip, operands.split(','))
if operation in typeRinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[2]]
rd = regname2number[operands[0]]
funct = operation2funct[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b000000{:05b}{:05b}{:05b}00000{:06b}; // {}".format(i, rs, rt, rd,funct, line)
elif operation in typeIinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeBRANCHinstructions:
rs = regname2number[operands[0]]
rt = regname2number[operands[1]]
if not isinmm(operands[2]):
# Then is a label. Convert it to offset from PC + 4
operands[2] = str((labels[operands[2].lower()]*4+startaddr - (i*4 + 4)) >> 2)
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeMEMinstructions:
rt = regname2number[operands[0]]
offset,rs = map(lambda e: e.replace(')', ''), operands[1].split('('))
if not isinmm(offset):
offset = str(labels[offset.lower()]*4+startaddr)
rs = regname2number[rs]
inmmediate = int(offset) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeJinstructions:
opcode = instruction2opcode[operation]
if not isinmm(operands[0]):
operands[0] = str(labels[operands[0].lower()]*4+startaddr)
address = (int(operands[0]) >> 2) & 0x3FFFFFF
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:026b}; // {}".format(i, opcode, address, line)
elif operation in typeSHIFTinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
shiftamount = operands[2]
assert(isinmm(shiftamount))
shiftamount = int(shiftamount)
assert(shiftamount >= 0)
funct = operation2funct[operation]
for j in range(shiftamount):
if j == 0:
# La primera vez, shiftear el registro pedido
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rs, rt, funct, line)
else:
# El resto de las veces shiftear el registro "resultado" para que los shifts sean acumulativos
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rt, rt, funct, line)
i = i + 1
i = i - 1
else:
assert(False)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Uso: mipsas.py archivo.asm"
exit(1)
filename = sys.argv[1]
if not os.path.isfile(filename):
print "No se encuentra el archivo {}".format(filename)
exit(1)
f = open(filename, 'r')
filecontent = f.read()
f.close()
mipsminiassembler(0, filecontent)
| Python |
#!/usr/bin/python
import sys
import os
import re
typeRinstructions = ['add', 'sub', 'and', 'or', 'nor', 'slt', 'jr']
typeIinstructions = ['addi', 'ori', 'lui']
typeBRANCHinstructions = ['beq']
typeMEMinstructions = ['lw', 'sw']
typeJinstructions = ['j', 'jal']
typeSHIFTinstructions = ['sll', 'srl']
typePseudoInstructions = ['li']
instruction2opcode = {
'ori': 0xd,
'lui': 0xf,
'addi': 0x8,
'beq': 0x4,
'lw': 0x23,
'sw': 0x2b,
'j': 0x2,
'jal': 0x3,
}
operation2funct = {
'add': 0x20,
'sub': 0x22,
'and': 0x24,
'or': 0x25,
'nor': 0x27,
'slt': 0x2a,
'sll': 0x00,
'srl': 0x02,
'jr': 0x08,
}
regname2number = {
'$zero': 0,
'$at': 1,
'$v0': 2,
'$v1': 3,
'$a0': 4,'$a1': 5,'$a2': 6,'$a3': 7,
'$t0': 8,'$t1': 9,'$t2': 10,'$t3': 11,'$t4': 12,'$t5': 13,'$t6': 14,'$t7': 15,
'$s0': 16,'$s1': 17,'$s2': 18,'$s3': 19,'$s4': 20,'$s5': 21,'$s6': 22,'$s7': 23,
'$t8': 24,'$t9': 25,
'$k0': 26,'$k1': 27,
'$gp': 28,
'$sp': 29,
'$fp': 30,
'$ra': 31,
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '10': 10, '11': 11, '12': 12, '13': 13, '14': 14, '15': 15,
'16': 16, '17': 17, '18': 18, '19': 19, '20': 20, '21': 21, '22': 22, '23': 23, '24': 24, '25': 25, '26': 26, '27': 27, '28': 28, '29': 29, '30': 30, '31': 31,
}
reservedwords = typeRinstructions + typeIinstructions + typeJinstructions + typeMEMinstructions + instruction2opcode.keys() + operation2funct.keys() + regname2number.keys() + typeBRANCHinstructions + typeSHIFTinstructions
labels = dict()
def isinmm(word):
return (word[0] in map(str, range(10)))
def isreg(word):
return (word[0] == '$') or (word in map(str, range(32)))
def isdirective(line):
return line[0] == '.'
def getlinelabelandinstruction(line):
label,sep,instruction = map(str.strip, line.partition(':'))
if (sep == '') and (instruction == ''):
label,instruction = instruction,label
label = label.lower()
return (label, instruction)
def definelabels(linesplittedcode):
global labels
i = -1
for line in linesplittedcode:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if label != '':
assert(not (label in reservedwords))
assert(not (label[0] in map(str, range(10))))
labels[label] = i
if instruction == '':
i = i - 1
def goFirstForPseudoInstructions(linesplittedcode):
#global labels
i = -1
for line in linesplittedcode:
i = i + 1
#Saltear lineas en blanco
if line.strip() == '':
continue
label, instruction = getlinelabelandinstruction(line)
#salteo lineas con labels
if instruction == '':
continue
operation, operands = map(str.lower, re.split('\s+', instruction, 1))
operands = map(str.strip, operands.split(','))
if operation in typePseudoInstructions:
if operation == 'li':
imm = int(operands[1])
high=imm >> 16
low=imm & 0x0000FFFF
line1 = "lui $at, "+str(high)
if label != '':
line1 = label+":\t"+line1
line2 = "ori "+operands[0]+", $at, "+str(low)
#print linesplittedcode[i]
linesplittedcode.pop(i)
linesplittedcode.insert(i,line1)
linesplittedcode.insert(i+1,line2)
#if instruction == '':
# i = i - 1
def mipsminiassembler(startaddr, code):
global labels
labels = dict()
lines = map(str.strip, code.split('\n'))
#print lines
# Pasada cero para pseudoINSTRUCCIONES
goFirstForPseudoInstructions(lines)
#print lines
#os._exit(1)
# Primera pasada para buscar las etiquetas
definelabels(lines)
# Segunda pasada para ensamblar
i = -1
for line in lines:
# Saltear lineas en blanco
if line.strip() == '':
continue
i = i + 1
label, instruction = getlinelabelandinstruction(line)
if instruction == '':
i = i - 1
continue
operation, operands = map(str.lower, re.split('\s+', instruction, 1))
operands = map(str.strip, operands.split(','))
if operation in typeRinstructions:
if len(operands) == 1:
rs = regname2number[operands[0]]
rt = regname2number["$zero"]
rd = regname2number["$zero"]
else:
rs = regname2number[operands[1]]
rt = regname2number[operands[2]]
rd = regname2number[operands[0]]
funct = operation2funct[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b000000{:05b}{:05b}{:05b}00000{:06b}; // {}".format(i, rs, rt, rd,funct, line)
elif operation in typeIinstructions:
#print operands
if len(operands) < 3:
rs = regname2number["$zero"]
rt = regname2number[operands[0]]
inmmediate = int(operands[1]) & 0xFFFF # No more than 16 bits
elif len(operands) == 3:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeBRANCHinstructions:
rs = regname2number[operands[0]]
rt = regname2number[operands[1]]
if not isinmm(operands[2]):
# Then is a label. Convert it to offset from PC + 4
operands[2] = str((labels[operands[2].lower()]*4+startaddr - (i*4 + 4)) >> 2)
inmmediate = int(operands[2]) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeMEMinstructions:
rt = regname2number[operands[0]]
offset,rs = map(lambda e: e.replace(')', ''), operands[1].split('('))
if not isinmm(offset):
offset = str(labels[offset.lower()]*4+startaddr)
rs = regname2number[rs]
inmmediate = int(offset) & 0xFFFF # No more than 16 bits
opcode = instruction2opcode[operation]
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:05b}{:05b}{:016b}; // {}".format(i, opcode, rs, rt, inmmediate, line)
elif operation in typeJinstructions:
opcode = instruction2opcode[operation]
if not isinmm(operands[0]):
operands[0] = str(labels[operands[0].lower()]*4+startaddr)
address = (int(operands[0]) >> 2) & 0x3FFFFFF
print "uut.DPuniciclo.IMemory[{}] = 32'b{:06b}{:026b}; // {}".format(i, opcode, address, line)
elif operation in typeSHIFTinstructions:
rs = regname2number[operands[1]]
rt = regname2number[operands[0]]
shiftamount = operands[2]
assert(isinmm(shiftamount))
shiftamount = int(shiftamount)
assert(shiftamount >= 0)
funct = operation2funct[operation]
for j in range(shiftamount):
if j == 0:
# La primera vez, shiftear el registro pedido
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rs, rt, funct, line)
else:
# El resto de las veces shiftear el registro "resultado" para que los shifts sean acumulativos
print "uut.DPuniciclo.IMemory[{}] = 32'b00000000000{:05b}{:05b}00001{:06b}; // {}".format(i, rt, rt, funct, line)
i = i + 1
i = i - 1
else:
assert(False)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Uso: mipsas.py archivo.asm"
exit(1)
filename = sys.argv[1]
if not os.path.isfile(filename):
print "No se encuentra el archivo {}".format(filename)
exit(1)
f = open(filename, 'r')
filecontent = f.read()
f.close()
mipsminiassembler(0, filecontent)
| Python |
import libtcodpy as libtcod
INVENTORY_SIZE = 9
SCREEN_W = 80
SCREEN_H = 25
MAP_W = 59
MAP_H = 18
BUFFER_H = 5
FONTS = [
('fonts/terminal10x18.png',
libtcod.FONT_LAYOUT_ASCII_INROW),
('fonts/terminal8x15.png',
libtcod.FONT_LAYOUT_ASCII_INROW),
('fonts/terminal8x8.png',
libtcod.FONT_LAYOUT_ASCII_INCOL),
]
VERSION = '0.1'
TITLE = 'featurecreeper v' + VERSION
TITLE_TEXT = [
'',
(libtcod.dark_green, 'by Jesse Scherer <scherer.jesse@gmail.com>'),
'',
(libtcod.dark_green, 'based heavily upon Madness'),
(libtcod.dark_green, 'by hmp <humpolec@gmail.com>'),
]
TITLE_THROBBER = [
(libtcod.white, 'Press ENTER to continue'),
]
TITLE_SPLASH = [
(libtcod.light_green, " __ _ "),
(libtcod.light_green, " / _| ___ __ _| |_ _ _ _ __ ___ ___ _ __ ___ ___ _ __ ___ _ __ "),
(libtcod.light_green, "| |_ / _ \/ _` | __| | | | '__/ _ \/ __| '__/ _ \/ _ \ '_ \ / _ \ '__|"),
(libtcod.light_green, "| _| __/ (_| | |_| |_| | | | __/ (__| | | __/ __/ |_) | __/ | "),
(libtcod.light_green, "|_| \___|\__,_|\__|\__,_|_| \___|\___|_| \___|\___| .__/ \___|_| "),
(libtcod.light_green, " |_| "),
]
HELP_TEXT = '''\
--- featurecreeper - a roguelike ---
Your task is simple: reach level 10 of the dungeon and defeat the evil
Dungeon Master. Be careful - not all of what you see is real. And not all of
what isn't real is harmless...
You start with a single candle in your backpack. Use it well.
--- Keybindings ---
Move: numpad, Inventory: i
arrow keys, Pick up: g, ,
yuhjklbn Drop: d
Wait: 5, . Descend: >
Look: x, ; Ascend: <
Help: ? Change font: F10
Quit: q, Esc Wizard Menu: W
[Press any key to continue]'''
UNKNOWN_GLYPH = '?', libtcod.red
MAX_SANITY = 100
MAX_SPEED = 5
MIN_SPEED = -4
MAX_CLEVEL = 6
MAX_DLEVEL = 10
INVENTORY_SLOTS = {
'w': 'wielded',
'l': 'carried as light source',
'a': 'being worn',
'b': 'being worn',
}
| Python |
from sys import stdout
for i in range(16):
for j in range(16):
n = i*16+j
if 32 <= n < 128:
c = chr(n)
else:
c = '.'
stdout.write(c)
stdout.write('\n')
| Python |
import libtcodpy as T
from settings import *
from util import *
from random import choice
import ui
class Item(object):
ALL = []
__metaclass__ = Register
ABSTRACT = True
common = 10
glyph = UNKNOWN_GLYPH
slot = None
speed = 0
armor = 0
plural = False
@property
def descr(self):
return self.name + self.mod_descr
@property
def a(self):
if self.plural:
return self.descr
else:
d = self.descr
if d[0].lower() in 'aeiuo':
return 'an '+self.descr
else:
return 'a '+self.descr
@property
def mod_descr(self):
s = ''
if self.speed != 0:
s += ' (%s%d speed)' % ('+' if self.speed > 0 else '',
self.speed)
if self.armor != 0:
s += ' (%s%d armor)' % ('+' if self.armor > 0 else '',
self.armor)
return s
def on_equip(self, player):
player.speed += self.speed
player.armor += self.armor
def on_unequip(self, player):
player.speed -= self.speed
player.armor -= self.armor
def on_use(self, player):
ui.message('You don\'t know how to use %s.' % self.descr)
class LightSource(Item):
ABSTRACT = True
slot = 'l'
@property
def descr(self):
if self.turns_left == self.turns:
s = self.name
else:
p = 100*self.turns_left/self.turns
s = '%s (%s%%)' % (self.name, p)
return s + self.mod_descr
def __init__(self):
super(LightSource, self).__init__()
self.turns_left = self.turns
def on_equip(self, player):
player.change_light_range(self.light_range)
def on_unequip(self, player):
player.change_light_range(-self.light_range)
class Weapon(Item):
ABSTRACT = True
slot = 'w'
common = 7
def __init__(self):
super(Weapon, self).__init__()
if roll(1, 4) == 1:
a, b, c = self.dice
c += roll(2, 4, -3)
if roll(1, 4) == 1:
b += roll(2, 2)
self.dice = a, b, c
@property
def descr(self):
return '%s (%s)%s' % (self.name, describe_dice(*self.dice),
self.mod_descr)
class Armor(Item):
ABSTRACT = True
def __init__(self):
super(Armor, self).__init__()
if roll(1, 5) == 1:
self.armor = self.armor + roll(2, 2, -2)
class Boots(Armor):
ABSTRACT = True
slot = 'b'
plural = True
class Mail(Armor):
ABSTRACT = True
slot = 'a'
class Potion(Item):
ABSTRACT = True
def on_use(self, player):
ui.message('You drink the %s.' % self.name)
player.items.remove(self)
##### LIGHT SOURCES
class Match(LightSource):
# Because I am evil
name = 'match'
glyph = ']', T.orange
level = 1
turns = 5
light_range = 1
def on_unequip(self, player):
super(Match, self).on_unequip(player)
if (self.turns_left > 0):
# Then the player was trying to extinguish a match
ui.message('You just throw the match away')
player.lose(self)
else:
# Then the match ran out and we were extinguish()ed
pass
class MatchBook(LightSource):
# Because I am even more evil
name = 'match book'
glyph = ']', T.light_grey
level = 2
turns = 10
light_range = 2
def on_unequip(self, player):
super(MatchBook, self).on_unequip(player)
if(self.turns_left > 0):
# Then the player was tyring to explicitly unequip a matchbook
player.lose(self)
else:
# Then the matchbook ran down and was extinguish()ed
ui.message('The matchbook burns your fingers as it burns down')
player.hp -= roll(1, 6)
# should not need to explicitly lose
# player.lose(self)
class Candle(LightSource):
# Just a smaller torch
name = 'candle'
glyph = ']', T.white
level = 1
turns = 50
light_range = 4
class Torch(LightSource):
name = 'torch'
glyph = '/', T.dark_orange
level = 1
turns = 180
light_range = 6
class Lamp(LightSource):
name = 'lamp'
glyph = ']', T.yellow
level = 3
turns = 300
light_range = 10
###### WEAPONS
class Dagger(Weapon):
name = 'dagger'
glyph = ')', T.light_grey # changed for Nethackishness
dice = 1, 4, 0
level = 1
class Stick(Weapon):
name = 'stick'
glyph = '/', T.dark_orange # if a torch is a wand, so is this
dice = 1, 3, 1
level = 1
class ShortSword(Weapon):
name = 'short sword'
glyph = ')', T.light_grey # changed for Nethackishness
dice = 1, 6, 0
level = 2
class HandAxe(Weapon):
name = 'hand axe'
glyph = ')', T.grey # changed for Nethackishness
dice = 1, 6, 1
level = 2
class Spear(Weapon):
name = 'spear'
glyph = ')', T.light_sky # changed for Nethackishness
dice = 1, 8, 2
level = 3
class LongSword(Weapon):
name = 'long sword'
glyph = ')', T.cyan # changed for Nethackishness
dice = 1, 10, 0
level = 3
class TwoSword(Weapon):
name = 'two-handed sword'
glyph = ')', T.light_grey # changed for Nethackishness
dice = 2, 8, 0
speed = -1
level = 4
class Halberd(Weapon):
name = 'halberd'
glyph = ')', T.light_grey # changed for Nethackishness
dice = 3, 6, 0
speed = -1
level = 4
class EterniumSword(Weapon):
name = 'eternium sword'
glyph = ')', T.white # changed for Nethackishness
dice = 4, 6, 0
level = 5
common = 5
##### BOOTS
class LightBoots(Boots):
name = 'light boots'
slot = 'b'
glyph = '[', T.dark_orange
armor = 1
level = 2
class HeavyBoots(Boots):
name = 'heavy boots'
glyph = '[', T.light_grey
armor = 2
speed = -1
level = 3
class BootsSpeed(Boots):
name = 'boots of speed'
glyph = '[', T.light_blue
speed = 3
level = 5
common = 5
##### ARMOR
class UglyClothes(Mail):
name = 'ugly clothes'
plural = True
glyph = '[', T.green
armor = 1
level = 1
class RingMail(Mail):
name = 'ring mail'
glyph = '[', T.grey
armor = 3
speed = -1
level = 3
common = 8
class PlateMail(Mail):
name = 'plate mail'
glyph = '[', T.white
armor = 6
speed = -2
level = 5
common = 6
##### POTIONS
class PotionSanity(Potion):
glyph = '!', T.blue
name = 'potion of sanity'
level = 3
common = 10
def on_use(self, player):
super(PotionSanity, self).on_use(player)
player.restore_sanity()
class PotionHealing(Potion):
glyph = '!', T.green
name = 'potion of health'
level = 2
def on_use(self, player):
super(PotionHealing, self).on_use(player)
ui.message('You feel healed.')
player.hp = player.max_hp
#player.sanity = min(player.sanity+15, MAX_SANITY)
class PotionLevel(Potion):
ABSTRACT = True
glyph = '!', T.yellow
name = 'potion of gain level'
def on_use(self, player):
super(PotionLevel, self).on_use(player)
player.advance()
class PotionLight(Potion):
ABSTRACT = True
glyph = '!', T.white
name = 'potion of light'
def on_use(self, player):
super(PotionLight, self).on_use(player)
ui.message('You feel yourself begin to glow')
player.change_light_range(1)
class PotionVictory(Potion):
ABSTRACT = True
glyph = '!', T.violet
name = 'potion of victory'
def on_use(self, player):
super(PotionVictory, self).on_use(player)
player.won = True
if __name__ == '__main__':
d = [random_by_level(10, Item.ALL)().descr for i in range(20)]
print '\n'.join(d)
| Python |
from random import randrange, choice, shuffle
import libtcodpy as T
# from corpse import Corpse
from mob import Monster, UnrealMonster, Player, Boss
from item import Item
from mapgen import generate_map
from settings import *
from util import *
import corpse
import ui
class Map(object):
def __init__(self, level):
self.tiles = generate_map(level)
self.level = level
self.player = None
self.mobs = []
self.fov_map = T.map_new(MAP_W, MAP_H)
for x in range(MAP_W):
for y in range(MAP_H):
tile = self.tiles[x][y]
T.map_set_properties(self.fov_map,
x, y,
tile.transparent,
tile.walkable)
self.populate()
if self.level == MAX_DLEVEL:
self.place_monsters(Boss)
def __del__(self):
T.map_delete(self.fov_map)
def find_tile(self, func):
for x in range(MAP_W):
for y in range(MAP_H):
tile = self.tiles[x][y]
if func(tile):
return (x, y, tile)
def recalc_fov(self):
T.map_compute_fov(self.fov_map,
self.player.x, self.player.y,
MAP_W,
True)
for x in range(MAP_W):
for y in range(MAP_H):
if self.is_visible(x, y):
self.tiles[x][y].remember_glyph()
def is_visible(self, x, y):
return T.map_is_in_fov(self.fov_map, x, y) and \
distance(x, y, self.player.x, self.player.y) <= \
self.player.fov_range
def neighbor_tiles(self, x, y):
for dx, dy in ALL_DIRS:
if in_map(x+dx, y+dy):
yield self.tiles[x+dx][y+dy]
def __del__(self):
T.map_delete(self.fov_map)
def do_turn(self, t):
for mob in self.mobs:
mob.heartbeat()
if mob.speed < 0 and \
t%(6+max(mob.speed, MIN_SPEED)) == 0:
continue
mob.act()
if mob.speed > 0 and \
t%(6-min(mob.speed, MAX_SPEED)) == 0:
mob.act()
def populate(self):
n_monsters = 3 + roll(2, self.level)
n_items = roll(2, 4, 1)
n_corpses = roll(self.level, 6)
for i in range(n_monsters):
mcls = random_by_level(self.level, Monster.ALL)
self.place_monsters(mcls)
for i in range(n_items):
x, y, tile = self.random_empty_tile(no_mob=False, no_stair=True)
item = random_by_level(self.level, Item.ALL)()
tile.items.append(item)
for i in range(n_corpses):
mon = random_by_level(self.level, Monster.ALL)
# Roll for older corpses deeper down
cps_age = roll(2 * self.level, 20)
x, y, tile = self.random_empty_tile(no_mob=False, no_stair=True)
cps = corpse.Corpse(src_mon = mon, age = cps_age)
tile.corpses.append(cps)
def flood(self, x, y, mcls, n):
if n == 0:
return n
if x < 0 or x >= MAP_W or y < 0 or y >= MAP_H:
return n
tile = self.tiles[x][y]
if tile.mob or not tile.walkable:
return n
mcls().put(self, x, y)
n -= 1
dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
shuffle(dirs)
for dx, dy in dirs:
n = self.flood(x+dx, y+dy, mcls, n)
return n
def place_monsters(self, mcls, *args, **kwargs):
x, y, tile = self.random_empty_tile(*args, **kwargs)
self.flood(x, y, mcls, mcls.multi)
def random_empty_tile(self, no_mob=True, not_seen=False, no_stair=False):
while True:
x, y = randrange(MAP_W), randrange(MAP_H)
tile = self.tiles[x][y]
if not tile.walkable:
continue
if no_mob and tile.mob:
continue
if not_seen and self.is_visible(x, y):
continue
if no_stair and isinstance(tile, StairDownTile):
continue
return (x, y, tile)
def flip_playfield_vert(self):
"""
Flips the map up-and-down.
a...b c...d
..... -> .....
c...d a...b
"""
new_tiles = array(MAP_W, MAP_H, lambda: ' ')
for x in range(MAP_W):
# ui.message('attempting x = %d' % x)
for y in range(MAP_H):
# ui.message('attempting y = %d' % (MAP_H - y -1))
new_tiles[x][y] = self.tiles[x][MAP_H - y - 1]
T.map_set_properties(self.fov_map,
x, y,
new_tiles[x][y].transparent,
new_tiles[x][y].walkable)
for mon in self.mobs:
mon.y = MAP_H - mon.y - 1
self.tiles = new_tiles
self.recalc_fov()
ui.draw_all()
def flip_playfield_horiz(self):
"""
Flips the map right-to-left.
a...b b...a
..... -> .....
c...d d...c
"""
new_tiles = array(MAP_W, MAP_H, lambda: ' ')
for x in range(MAP_W):
for y in range(MAP_H):
new_tiles[x][y] = self.tiles[MAP_W - x -1][y]
T.map_set_properties(self.fov_map,
x, y,
new_tiles[x][y].transparent,
new_tiles[x][y].walkable)
for mon in self.mobs:
mon.x = MAP_W - mon.x - 1
self.tiles = new_tiles
self.recalc_fov()
ui.draw_all()
def invert_playfield(self):
"""
Flips the map across X and Y axes:
a...b d...c
..... -> .....
c...d b...a
"""
new_tiles = array(MAP_W, MAP_H, lambda: ' ')
for x in range(MAP_W):
for y in range(MAP_H):
new_tiles[x][y] = self.tiles[MAP_W - x - 1][MAP_H - y - 1]
T.map_set_properties(self.fov_map,
x, y,
new_tiles[x][y].transparent,
new_tiles[x][y].walkable)
for mon in self.mobs:
mon.x = MAP_W - mon.x - 1
mon.y = MAP_H - mon.y - 1
self.tiles = new_tiles
self.recalc_fov()
ui.draw_all()
def flip_playfield_main(self):
"""
Flips the map along the main diagonal (top left to bottom right).
Not currently feasible under nonsquare maps.
a...b a...c
..... -> .....
c...d b...d
"""
pass
def flip_playfield_minor(self):
"""
Flips the map along the minor diagonal (top right to bottom left).
Not currently feasible under nonsquare maps.
a...b d...b
..... -> .....
c...d c...a
"""
pass
def rotate_playfield_cw(self):
"""
Rotates the map clockwise.
Not currently feasible under nonsquare maps.
a...b c...a
..... -> .....
c...d d...b
"""
pass
def rotate_playfield_ccw(self):
"""
Rotates the map counterclockwise.
Not currently feasible under nonsquare maps.
a...b b...d
..... -> .....
c...d a...c
"""
pass
class Tile(object):
walkable = True
transparent = True
glyph = UNKNOWN_GLYPH
known_glyph = ' ', T.white
def __init__(self):
self.mob = None
self.items = []
self.corpses = []
@property
def visible_glyph(self):
if self.mob:
return self.mob.glyph
elif self.items:
return self.items[-1].glyph
elif self.corpses:
return self.corpses[-1].glyph
else:
return self.glyph
def remember_glyph(self):
if self.items:
self.known_glyph = self.items[-1].glyph
else:
self.known_glyph = self.glyph
def forget_glyph(self):
self.known_glyph = ' ', T.white
def on_enter(self):
pass
class FloorTile(Tile):
name = 'floor'
walkable = True
transparent = True
glyph = '.', T.grey
class WallTile(Tile):
name = 'stone wall'
walkable = False
transparent = False
glyph = '#', T.grey
class WoodWallTile(Tile):
name = 'wooden wall'
walkable = False
transparent = False
glyph = '#', T.dark_orange
class StairUpTile(Tile):
name = 'stairs up'
walkable = True
transparent = True
glyph = '<', T.light_grey
class StairDownTile(Tile):
name = 'stairs down'
walkable = True
transparent = True
glyph = '>', T.light_grey
def on_enter(self):
ui.message('There is a down stairway here.')
| Python |
from random import randrange
from sys import stdout
import libtcodpy as T
from settings import *
from util import *
def array_to_tiles(arr):
import map
TILE_TABLE = {
'.': map.FloorTile,
'#': map.WoodWallTile,
' ': map.WallTile,
'>': map.StairDownTile,
'<': map.StairUpTile,
}
return [[TILE_TABLE[c]() for c in line] for line in arr]
def try_put_room(arr, w, h):
x1, y1 = randrange(MAP_W-w), randrange(MAP_H-h)
for x in range(x1, x1+w):
for y in range(y1, y1+h):
if arr[x][y] != ' ':
return None
for x in range(x1, x1+w):
for y in range(y1, y1+h):
arr[x][y] = '.'
for x in range(x1, x1+w):
arr[x][y1] = '#'
arr[x][y1+h-1] = '#'
for y in range(y1, y1+h):
arr[x1][y] = '#'
arr[x1+w-1][y] = '#'
return (x1, y1, w, h)
def print_array(arr):
for y in range(len(arr[0])):
for line in arr:
stdout.write(line[y])
stdout.write('\n')
def generate_map(level):
"""
The following is courtesy hmp in the thread
'The incredible power of Dijkstra maps'
on rec.games.roguelike.development
wherein he explains what's going on here:
Start with Wall squares everywhere.
Place N non-overlapping rooms randomly, putting Floor and RoomWall
squares on the map.
For each pair of rooms (1,2), (2,3) .. (N-1, N):
Find a path between that two rooms, assigning different costs to
different square types.
Carve that path into the map as a new corridor (Floor).
Now, depending on how you assign costs to (Floor, RoomWall, Wall)
squares:
Low Floor, medium Wall, high RoomWall cost = nice interconnected
dungeon.
Very high Wall cost = as few cells carved as possible, lots of reused
corridors, possibly very roundabout paths.
High Floor cost = no reuse, so lots of redundant corridors, lots of
double-spaced corridors.
Low RoomWall cost = irregular, cave-like rooms because of walls
getting "eaten" in some places.
"""
arr = array(MAP_W, MAP_H, lambda: ' ')
rooms = []
for i in xrange(500):
w, h = randrange(5,15), randrange(5,10)
room = try_put_room(arr, w, h)
if room:
rooms.append(room)
if level > 1:
# until we can escape the dungeon
# should this be placed under the avatar's starting position?
randomly_place(arr, '<')
if level < MAX_DLEVEL:
randomly_place(arr, '>')
costs = [(5, 40, 1),
(5, 1, 2),
(5, 40, 40)][3*level/(MAX_DLEVEL+1)]
def corridor_path_func(x1, y1, x2, y2, data):
if x2 == 0 or x2 == MAP_W-1 or y2 == 0 or y2 == MAP_H-1:
return 0
c = arr[x2][y2]
if c == ' ':
return costs[0]
elif c == '#':
return costs[1]
else:
return costs[2]
path = T.path_new_using_function(
MAP_W, MAP_H, corridor_path_func, None, 0.0)
def connect(x1, y1, x2, y2):
T.path_compute(path, x1, y1, x2, y2)
for i in range(T.path_size(path)):
x, y = T.path_get(path, i)
c = arr[x][y]
if c == '#' or c == ' ':
arr[x][y] = '.'
for i in range(len(rooms)-1):
x1, y1, w1, h1 = rooms[i]
x2, y2, w2, h2 = rooms[i+1]
connect(x1+w1/2, y1+h1/2, x2+w2/2, y2+h2/2)
#print_array(arr)
T.path_delete(path)
return array_to_tiles(arr)
def randomly_place(arr, c):
x, y = random_empty_space(arr)
arr[x][y] = c
def random_empty_space(arr):
while True:
x, y = randrange(MAP_W), randrange(MAP_H)
if arr[x][y] == '.':
return (x, y)
| Python |
#!/usr/bin/python
import sys
import settings
from game import Game
if __name__ == '__main__':
wizard = 'wizard' in sys.argv
Game(wizard).play()
| Python |
from distutils.core import setup
import py2exe
# Run 'python win_setup.py py2exe'
opts = {
"py2exe": {
"ascii": True,
"excludes": ["_ssl"],
"compressed": True,
"excludes": ['_ssl', # Exclude _ssl
'pyreadline', 'difflib', 'doctest', 'locale',
'optparse', 'pickle', 'calendar'], # Exclude standard library
'bundle_files': 1,
}
}
setup(options=opts, windows=['creeper.py'], zipfile=None)
| Python |
import libtcodpy as T
from settings import *
from util import *
from random import choice
# from mob import Monster
import mob
import ui
class Corpse(object):
"""Represents all manner of dead creatures"""
# The following would be used for Madness' class-based random select
# ALL = []
# __metaclass__ = Register
# ABSTRACT = True
# common = 10
# age = 0
turn_created = 0
glyph = '%', T.dark_grey
plural = False
name = 'unidentifiable corpse'
@property
def descr(self):
return self.name
@property
def age(self):
return ui.GAME.turns - self.turn_created
@property
def a(self):
if self.plural:
return self.descr
else:
d = self.descr
if d[0].lower() in 'aeiuo':
return 'an ' + self.descr
else:
return 'a ' + self.descr
def __init__(self, src_mon = None, age = None):
"""Creates a new corpse, randomly or based on attributes"""
cps_names = ('dead %s',\
'cadaverous %s',\
'perished %s',\
'deceased %s',\
'lifeless %s',\
'wasted %s',\
'%s corpse',\
'%s cadaver',\
'%s body')
if src_mon:
self.name = choice(cps_names) % src_mon.name
try:
self.glyph = '%', src_mon.glyph[1]
except TypeError:
# We get TypeErrors when we try to subscript rainbow glyphs
pass # We just leave the glyph the default color
if age:
# Age the corpse appropriately
self.turn_created = ui.GAME.turns - age
else:
# If not defined, we make a fresh corpse
self.turn_created = ui.GAME.turns
def describe_corpse(self, player):
adj = []
if self.age < 10:
adj.append(choice(('very fresh','bloody','twitching')))
elif self.age < 50:
adj.append(choice(('fresh','warm','trickling','weeping')))
elif self.age < 100:
adj.append(choice(('stiff','cold','oozing','slimy')))
elif self.age < 200:
adj.append(choice(('decaying','putrid','rotting','moldy',\
'foul','fetid','noisome')))
elif self.age >= 200:
adj.append(choice(('rotten','worm-eaten','corrupt','rancid')))
if 'h' in player.effects:
adj.append(choice(('captivating','delicious','adorable',\
'tempting','bewitching','enchanting')))
if 'f' in player.effects:
adj.append(choice(('terrifying','awful','ghastly','hideous',\
'striking','imposing')))
return ', '.join(adj) + ' ' + self.descr
| Python |
from __future__ import with_statement
import libtcodpy as T
from settings import *
from map import *
from mob import *
from item import *
from util import in_map
import ui
KEYS = [
(['y', '7'], ('walk', (-1, -1))),
(['k', '8', T.KEY_UP], ('walk', (0, -1))),
(['u', '9'], ('walk', (1, -1))),
(['h', '4', T.KEY_LEFT], ('walk', (-1, 0))),
(['.', '5'], 'wait'),
(['l', '6', T.KEY_RIGHT], ('walk', (1, 0))),
(['b', '1'], ('walk', (-1, 1))),
(['j', '2', T.KEY_DOWN], ('walk', (0, 1))),
(['n', '3'], ('walk', (1, 1))),
(['q', T.KEY_ESCAPE], 'quit'),
(['g', ','], 'pick_up'),
(['i'], 'inventory'),
(['d'], 'drop'),
(['>'], 'descend'),
(['<'], 'ascend'),
(['x', ';'], 'look'),
(['?'], 'help'),
([T.KEY_F10], 'cycle_font'),
(['W'], 'wizard'),
# (['Z'], 'flip_horiz'),
# (['X'], 'flip_vert'),
# (['C'], 'invert')
]
def decode_key(key):
for keys, cmd in KEYS:
if key in keys:
return cmd
return None
class Quit(Exception):
pass
class Game(object):
def __init__(self, wizard):
self.wizard = wizard
def play(self):
ui.init(self)
ui.title_screen()
self.start()
self.loop()
ui.close()
def start(self):
self.player = Player(self.wizard)
self.turns = 0
self.map_stack = [None for i in range(MAX_DLEVEL)]
ui.message('Welcome to featurecreeper!')
ui.message('Press ? for help.')
self.start_map(1)
def start_map(self, level):
# should we emit some message on entry?
# we cannot shelve the old map on the first turn
if self.turns is not 0:
self.map.last_turn = self.turns
self.map_stack[self.map.level - 1] = self.map
if not self.map_stack[level - 1]:
self.map = Map(level)
else:
self.map = self.map_stack[level - 1]
# forget stuff based on how long we've been gone
time_gone = self.turns - self.map.last_turn
for i in range(time_gone):
x, y = randrange(MAP_W), randrange(MAP_H)
self.map.tiles[x][y].forget_glyph()
# and probably distort the remembered map
choice = roll(1, 4)
if choice == 4:
self.map.invert_playfield()
elif choice == 3:
self.map.flip_playfield_horiz()
elif choice == 2:
self.map.flip_playfield_vert()
x, y, _ = self.map.random_empty_tile()
self.player.put(self.map, x, y)
def loop(self):
ui.draw_all()
try:
while True:
if self.player.death:
if self.wizard:
if ui.prompt('Die? [yn]', 'yn') == 'n':
ui.new_ui_turn()
self.player.resurrect()
ui.draw_all()
continue
ui.prompt(
'[Game over: %s. Press ENTER]' % self.player.death,
[T.KEY_ENTER, T.KEY_KPENTER])
self.save_character_dump()
raise Quit()
if self.player.won:
self.congratulate_player()
self.save_character_dump()
raise Quit()
while self.player.action_turns > 0:
key = ui.readkey()
self.do_command(key)
self.map.do_turn(self.turns)
self.turns += 1
ui.draw_all()
except Quit:
pass
def do_command(self, key):
cmd = decode_key(key)
if cmd is None:
return
ui.new_ui_turn()
if isinstance(cmd, str):
getattr(self, 'cmd_'+cmd)()
else:
name, args = cmd
getattr(self, 'cmd_'+name)(*args)
ui.draw_all()
def cmd_walk(self, dx, dy):
self.player.walk(dx, dy)
def cmd_wait(self):
self.player.wait()
def cmd_pick_up(self):
tile = self.player.tile
if tile.items == []:
if tile.corpses:
ui.message('You cannot bring yourself to pick up a corpse.')
else:
ui.message('There is nothing here to pick up.')
elif len(tile.items) == 1:
self.player.pick_up(tile.items[0])
else:
while True and tile.items:
item = ui.select_item('Select an item to pick up',
tile.items)
if item:
self.player.pick_up(item)
ui.draw_all()
else:
break
def cmd_drop(self):
item = ui.select_item('Select an item to drop', self.player.items)
if item:
self.player.drop(item)
def cmd_inventory(self):
item = ui.select_item('Select an item to use', self.player.items)
if item:
self.player.use(item)
def cmd_ascend(self):
if not isinstance(self.player.tile, StairUpTile):
ui.message('Stand on an up stairway to ascend.')
return
ui.message('You climb upwards...')
self.player.remove()
self.turns += 1
self.start_map(self.map.level-1)
def cmd_descend(self):
if not isinstance(self.player.tile, StairDownTile):
ui.message('Stand on a down stairway to descend.')
return
ui.message('You climb downwards...')
self.player.remove()
self.turns += 1
self.start_map(self.map.level+1)
def cmd_quit(self):
if ui.prompt('Quit? [yn]', 'yn') == 'y':
raise Quit()
else:
ui.new_ui_turn()
def cmd_cycle_font(self):
ui.cycle_font()
def cmd_help(self):
ui.help_screen()
def cmd_wizard(self):
if self.wizard and self.map.level < MAX_DLEVEL:
self.player.remove()
self.start_map(self.map.level+1)
def cmd_look(self):
ui.look_mode()
def cmd_flip_vert(self):
self.map.flip_playfield_vert()
def cmd_flip_horiz(self):
self.map.flip_playfield_horiz()
def cmd_invert(self):
self.map.invert_playfield()
def congratulate_player(self):
effects = self.player.effects
if 'r' in effects:
ui.prompt(
'You are the Dungeon Master now. Press ENTER',
[T.KEY_ENTER, T.KEY_KPENTER])
elif 'f' in effects:
ui.prompt(
'It will all be okay now. Press ENTER',
[T.KEY_ENTER, T.KEY_KPENTER])
elif 'd' in effects:
ui.prompt(
'The darkness finally recedes. Press ENTER',
[T.KEY_ENTER, T.KEY_KPENTER])
elif 'h' in effects:
ui.prompt(
'You have had such fun, you decide to stay. \
Press ENTER',
[T.KEY_ENTER, T.KEY_KPENTER])
else:
ui.prompt(
'Congratulations! You have won. Press ENTER',
[T.KEY_ENTER, T.KEY_KPENTER])
def save_character_dump(self):
from datetime import datetime
try:
with open('character.txt', 'w') as f:
f.write('%s - character dump\n\n' % TITLE)
f.write(datetime.strftime(datetime.now(), '%d/%m/%Y %H:%M')+'\n\n')
f.write(' MAP\n\n')
for y in range(MAP_H):
for x in range(MAP_W):
tile = self.map.tiles[x][y]
if tile.mob and not isinstance(tile.mob, UnrealMonster):
c, _ = tile.mob.glyph
elif tile.items:
c, _ = tile.items[-1].glyph
elif all(not tile.transparent
for tile in self.map.neighbor_tiles(x, y)):
c = ' '
else:
c, _ = tile.glyph
f.write(c)
f.write('\n')
f.write('\n LAST MESSAGES\n\n')
for _, s, _ in ui.MESSAGES[-10:]:
f.write(s+'\n')
f.write('\n STATUS\n\n')
f.write('\n'.join(ui.status_lines()))
f.write('\n\n INVENTORY\n\n')
for item in self.player.items:
f.write('%s%s\n' %
('*' if self.player.has_equipped(item) else ' ',
item.descr))
if self.player.effects:
f.write('\n INSANITY\n\n')
for eff in self.player.effects.values():
f.write(eff.long_descr + '\n')
except IOError:
pass
| Python |
from random import randrange, choice
import libtcodpy as T
from settings import *
ALL_DIRS = []
for dx in range(-1,2):
for dy in range(-1,2):
if (dx, dy) != (0, 0):
ALL_DIRS.append((dx, dy))
def distance(x1, y1, x2, y2):
dx = abs(x2-x1)
dy = abs(y2-y1)
return (dx+dy+max(dx,dy))/2
def sgn(a):
if a < 0:
return -1
elif a == 0:
return 0
else:
return 1
def dir_towards(x1, y1, x2, y2):
return sgn(x2-x1), sgn(y2-y1)
# I want to be able to go *away* too
def dir_away(x1, y1, x2, y2):
return sgn(x1-x2), sgn(y1-y2)
# Roll AdB+C
def roll(a, b, c=0):
return sum(randrange(1,b+1) for i in range(a))+c
def describe_dice(a, b, c):
s = '%dd%d' % (a, b)
if c > 0:
s += '+%d' % c
elif c < 0:
s += '-%d' % (-c)
return s
def random_by_level(level, items):
if roll(1, 200) == 1: # out of depth
max_level = 100
min_level = 3
else:
max_level = level/2+roll(1,2)
min_level = 1 #roll(1, max(1, max_level-1))
items = filter(lambda a: min_level <= a.level <= max_level, items)
n = randrange(sum(item.common for item in items))
for item in items:
if n < item.common:
return item
else:
n -= item.common
return choice(items)
def array(w, h, func):
def line():
return [func() for y in range(h)]
return [line() for x in range(w)]
def in_map(x, y):
return 0 <= x and x < MAP_W and 0 <= y and y < MAP_H
class Register(type):
def __new__(mcs, name, bases, dict):
cls = type.__new__(mcs, name, bases, dict)
if not dict.get('ABSTRACT'):
cls.ALL.append(cls)
return cls
RAINBOW_COLORS = [T.red, T.blue, T.green, T.yellow,
T.pink, T.white]
def rainbow_glyph(c, remember=True):
if not remember:
return property(lambda self: (c, choice(RAINBOW_COLORS)))
@property
def glyph(self):
if not hasattr(self, 'rainbow_glyph'):
self.rainbow_glyph = c, choice(RAINBOW_COLORS)
return self.rainbow_glyph
return glyph
| Python |
from random import choice, randrange, shuffle
from util import *
from mob import *
import ui
class InsaneEffect(object):
letter = '?'
add_message = ''
remove_message = ''
long_descr = '<description>'
def add(self, player):
self.player = player
assert self.letter not in player.effects
player.effects[self.letter] = self
if self.add_message:
ui.message(self.add_message, T.yellow)
def remove(self):
assert self.letter in self.player.effects
del self.player.effects[self.letter]
if self.remove_message:
ui.message(self.remove_message, T.yellow)
def do_effect(self, severity):
pass
class Hallu(InsaneEffect):
sanity_range = 80, 55
def add(self, player):
if self.exclude not in player.effects:
super(Hallu, self).add(player)
def do_effect(self, severity):
super(Hallu, self).do_effect(severity)
m = self.player.map
for i in range(severity):
mcls = random_by_level(m.level, self.monsters_from.ALL)
m.place_monsters(mcls, not_seen=True)
def remove(self):
super(Hallu, self).remove()
for mon in filter(lambda m: not m.real,
self.player.map.mobs):
mon.remove()
class Happy(Hallu):
letter = 'h'
exclude = 'd'
add_message = 'The colors around you suddenly seem more intense.'
remove_message = 'The colors return to normal.'
long_descr = 'You are hallucinating.'
monsters_from = HappyMonster
class Dark(Hallu):
letter = 'd'
exclude = 'h'
remove_message = 'The darkness is calm once again.'
long_descr = 'You are afraid of the dark.'
monsters_from = DarkMonster
@property
def add_message(self):
if self.player.light_range > 0:
return 'The light suddenly becomes dim.'
else:
return 'You notice strange shapes in the darkness.'
class Real(InsaneEffect):
sanity_range = 40, 30
letter = 'r'
long_descr = 'Figments of your imagination can hurt you.'
class Fear(InsaneEffect):
sanity_range = 30, 5
letter = 'f'
add_message = 'You suddenly feel very scared.'
remove_message = 'You feel more confident.'
long_descr = 'You are prone to panic attacks.'
class MapFlip(InsaneEffect):
sanity_range = 60, 15
letter = 'm'
add_message = 'You lose your sense of direction.'
remove_message = 'You are no longer so turned around.'
long_descr = 'You have become confused about directions.'
def do_effect(self, severity):
super(MapFlip, self).do_effect(severity)
m = self.player.map
# I base this on the idea that severity ranges up to 8 in the extreme
if severity == 1 or severity == 3:
m.flip_playfield_vert()
elif severity == 2 or severity == 4:
m.flip_playfield_horiz()
else:
m.invert_playfield()
class Amnesia(InsaneEffect):
sanity_range = 100, 50
letter = 'a'
add_message = 'You feel forgetful.'
remove_message = 'You start to remember things again.'
long_descr = 'You are forgetting things you have seen.'
def do_effect(self, severity):
super(Amnesia, self).do_effect(severity)
m = self.player.map
# we pick a truly random tile, even visible or in-wall
for i in range(m.level * severity):
x, y = randrange(MAP_W), randrange(MAP_H)
m.tiles[x][y].forget_glyph()
EFFECTS = [Dark, Happy, Real, Fear, Amnesia, MapFlip]
def add_insane_effects(player):
shuffle(EFFECTS)
for ecls in EFFECTS:
if ecls.letter in player.effects:
continue
upper, lower = ecls.sanity_range
if player.sanity > upper:
continue
elif player.sanity <= lower:
ecls().add(player)
else: # in range
if roll(1, 20) == 1:
ecls().add(player)
| Python |
#
# libtcod 1.5.0 python wrapper
# Copyright (c) 2008 Jice
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * The name of Jice may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Jice ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Jice BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import ctypes
from ctypes import *
try: #import NumPy if available
import numpy
numpy_available = True
except ImportError:
numpy_available = False
if sys.platform.find('linux') != -1:
try:
_lib = ctypes.cdll['./libtcod.so']
except OSError:
_lib = ctypes.cdll['./libtcod64.so']
else:
_lib = ctypes.cdll['./libtcod-mingw.dll']
HEXVERSION = 0x010500
STRVERSION = "1.5.0"
TECHVERSION = 0x01050003
############################
# color module
############################
class Color(Structure):
_fields_ = [('r', c_uint, 8),
('g', c_uint, 8),
('b', c_uint, 8),
]
def __init__(self, r=0,g=0,b=0):
self.r = r
self.g = g
self.b = b
def __eq__(self, c):
return (self.r == c.r) and (self.g == c.g) and (self.b == c.b)
def __mul__(self, c):
iret=0
if isinstance(c,Color):
iret=_lib.TCOD_color_multiply_wrapper(col_to_int(self), col_to_int(c))
else:
iret=_lib.TCOD_color_multiply_scalar_wrapper(col_to_int(self), c_float(c))
return int_to_col(iret)
def __add__(self, c):
iret=_lib.TCOD_color_add_wrapper(col_to_int(self), col_to_int(c))
return int_to_col(iret)
def __sub__(self, c):
iret=_lib.TCOD_color_subtract_wrapper(col_to_int(self), col_to_int(c))
return int_to_col(iret)
def int_to_col(i) :
c=Color()
c.r=(i&0xFF0000)>>16
c.g=(i&0xFF00)>>8
c.b=(i&0xFF)
return c
def col_to_int(c) :
return (int(c.r) <<16) | (c.g<<8) | c.b;
# default colors
# grey levels
black=Color(0,0,0)
darker_grey=Color(31,31,31)
dark_grey=Color(63,63,63)
grey=Color(128,128,128)
light_grey=Color(191,191,191)
darker_gray=Color(31,31,31)
dark_gray=Color(63,63,63)
gray=Color(128,128,128)
light_gray=Color(191,191,191)
white=Color(255,255,255)
#standard colors
red=Color(255,0,0)
orange=Color(255,127,0)
yellow=Color(255,255,0)
chartreuse=Color(127,255,0)
green=Color(0,255,0)
sea=Color(0,255,127)
cyan=Color(0,255,255)
sky=Color(0,127,255)
blue=Color(0,0,255)
violet=Color(127,0,255)
magenta=Color(255,0,255)
pink=Color(255,0,127)
# dark colors
dark_red=Color(127,0,0)
dark_orange=Color(127,63,0)
dark_yellow=Color(127,127,0)
dark_chartreuse=Color(63,127,0)
dark_green=Color(0,127,0)
dark_sea=Color(0,127,63)
dark_cyan=Color(0,127,127)
dark_sky=Color(0,63,127)
dark_blue=Color(0,0,127)
dark_violet=Color(63,0,127)
dark_magenta=Color(127,0,127)
dark_pink=Color(127,0,63)
# darker colors
darker_red=Color(63,0,0)
darker_orange=Color(63,31,0)
darker_yellow=Color(63,63,0)
darker_chartreuse=Color(31,63,0)
darker_green=Color(0,63,0)
darker_sea=Color(0,63,31)
darker_cyan=Color(0,63,63)
darker_sky=Color(0,31,63)
darker_blue=Color(0,0,63)
darker_violet=Color(31,0,63)
darker_magenta=Color(63,0,63)
darker_pink=Color(63,0,31)
# light colors
light_red=Color(255,127,127)
light_orange=Color(255,191,127)
light_yellow=Color(255,255,127)
light_chartreuse=Color(191,255,127)
light_green=Color(127,255,127)
light_sea=Color(127,255,191)
light_cyan=Color(127,255,255)
light_sky=Color(127,191,255)
light_blue=Color(127,127,255)
light_violet=Color(191,127,255)
light_magenta=Color(255,127,255)
light_pink=Color(255,127,191)
# desaturated colors
desaturated_red=Color(127,63,63)
desaturated_orange=Color(127,95,63)
desaturated_yellow=Color(127,127,63)
desaturated_chartreuse=Color(95,127,63)
desaturated_green=Color(63,127,63)
desaturated_sea=Color(63,127,95)
desaturated_cyan=Color(63,127,127)
desaturated_sky=Color(63,95,127)
desaturated_blue=Color(63,63,127)
desaturated_violet=Color(95,63,127)
desaturated_magenta=Color(127,63,127)
desaturated_pink=Color(127,63,95)
# special
silver=Color(203,203,203)
gold=Color(255,255,102)
# color functions
def color_lerp(c1, c2, a):
iret = _lib.TCOD_color_lerp_wrapper(col_to_int(c1), col_to_int(c2), c_float(a))
return int_to_col(iret)
def color_set_hsv(c, h, s, v):
_lib.TCOD_color_set_HSV(byref(c), c_float(h), c_float(s), c_float(v))
def color_get_hsv(c):
h = c_float()
s = c_float()
v = c_float()
_lib.TCOD_color_get_HSV(c, byref(h), byref(s), byref(v))
return h.value, s.value, v.value
def color_gen_map(colors, indexes):
COLOR_ARRAY = c_ubyte * (3 * len(colors))
IDX_ARRAY = c_int * len(indexes)
ccolors = COLOR_ARRAY()
cindexes = IDX_ARRAY()
maxidx = 0
for i in range(len(colors)):
ccolors[3 * i] = colors[i].r
ccolors[3 * i + 1] = colors[i].g
ccolors[3 * i + 2] = colors[i].b
cindexes[i] = c_int(indexes[i])
if indexes[i] > maxidx:
maxidx = indexes[i]
RES_ARRAY = c_ubyte * (3 * (maxidx + 1))
cres = RES_ARRAY()
_lib.TCOD_color_gen_map(cres, len(colors), ccolors, cindexes)
res = list()
i = 0
while i < 3 * (maxidx + 1):
col = Color(cres[i], cres[i + 1], cres[i + 2])
res.append(col)
i += 3
return res
############################
# console module
############################
class Key(Structure):
_fields_=[('vk', c_int, 32),
('c', c_int, 8),
('pressed', c_uint, 8),
('lalt', c_uint, 8),
('lctrl', c_uint, 8),
('ralt', c_uint, 8),
('rctrl', c_uint, 8),
('shift', c_uint, 8),
]
_lib.TCOD_console_wait_for_keypress.restype = Key
_lib.TCOD_console_check_for_keypress.restype = Key
_lib.TCOD_console_credits_render.restype = c_uint
_lib.TCOD_console_set_custom_font.argtypes=[c_char_p,c_int]
# background rendering modes
BKGND_NONE = 0
BKGND_SET = 1
BKGND_MULTIPLY = 2
BKGND_LIGHTEN = 3
BKGND_DARKEN = 4
BKGND_SCREEN = 5
BKGND_COLOR_DODGE = 6
BKGND_COLOR_BURN = 7
BKGND_ADD = 8
BKGND_ADDA = 9
BKGND_BURN = 10
BKGND_OVERLAY = 11
BKGND_ALPH = 12
def BKGND_ALPHA(a):
return BKGND_ALPH | (int(a * 255) << 8)
def BKGND_ADDALPHA(a):
return BKGND_ADDA | (int(a * 255) << 8)
# non blocking key events types
KEY_PRESSED = 1
KEY_RELEASED = 2
# key codes
KEY_NONE = 0
KEY_ESCAPE = 1
KEY_BACKSPACE = 2
KEY_TAB = 3
KEY_ENTER = 4
KEY_SHIFT = 5
KEY_CONTROL = 6
KEY_ALT = 7
KEY_PAUSE = 8
KEY_CAPSLOCK = 9
KEY_PAGEUP = 10
KEY_PAGEDOWN = 11
KEY_END = 12
KEY_HOME = 13
KEY_UP = 14
KEY_LEFT = 15
KEY_RIGHT = 16
KEY_DOWN = 17
KEY_PRINTSCREEN = 18
KEY_INSERT = 19
KEY_DELETE = 20
KEY_LWIN = 21
KEY_RWIN = 22
KEY_APPS = 23
KEY_0 = 24
KEY_1 = 25
KEY_2 = 26
KEY_3 = 27
KEY_4 = 28
KEY_5 = 29
KEY_6 = 30
KEY_7 = 31
KEY_8 = 32
KEY_9 = 33
KEY_KP0 = 34
KEY_KP1 = 35
KEY_KP2 = 36
KEY_KP3 = 37
KEY_KP4 = 38
KEY_KP5 = 39
KEY_KP6 = 40
KEY_KP7 = 41
KEY_KP8 = 42
KEY_KP9 = 43
KEY_KPADD = 44
KEY_KPSUB = 45
KEY_KPDIV = 46
KEY_KPMUL = 47
KEY_KPDEC = 48
KEY_KPENTER = 49
KEY_F1 = 50
KEY_F2 = 51
KEY_F3 = 52
KEY_F4 = 53
KEY_F5 = 54
KEY_F6 = 55
KEY_F7 = 56
KEY_F8 = 57
KEY_F9 = 58
KEY_F10 = 59
KEY_F11 = 60
KEY_F12 = 61
KEY_NUMLOCK = 62
KEY_SCROLLLOCK = 63
KEY_SPACE = 64
KEY_CHAR = 65
# special chars
# single walls
CHAR_HLINE = 196
CHAR_VLINE = 179
CHAR_NE = 191
CHAR_NW = 218
CHAR_SE = 217
CHAR_SW = 192
CHAR_TEEW = 180
CHAR_TEEE = 195
CHAR_TEEN = 193
CHAR_TEES = 194
CHAR_CROSS = 197
# double walls
CHAR_DHLINE = 205
CHAR_DVLINE = 186
CHAR_DNE = 187
CHAR_DNW = 201
CHAR_DSE = 188
CHAR_DSW = 200
CHAR_DTEEW = 185
CHAR_DTEEE = 204
CHAR_DTEEN = 202
CHAR_DTEES = 203
CHAR_DCROSS = 206
# blocks
CHAR_BLOCK1 = 176
CHAR_BLOCK2 = 177
CHAR_BLOCK3 = 178
# arrows
CHAR_ARROW_N = 24
CHAR_ARROW_S = 25
CHAR_ARROW_E = 26
CHAR_ARROW_W = 27
# arrows without tail
CHAR_ARROW2_N = 30
CHAR_ARROW2_S = 31
CHAR_ARROW2_E = 16
CHAR_ARROW2_W = 17
# double arrows
CHAR_DARROW_H = 29
CHAR_DARROW_V = 18
# GUI stuff
CHAR_CHECKBOX_UNSET = 224
CHAR_CHECKBOX_SET = 225
CHAR_RADIO_UNSET = 9
CHAR_RADIO_SET = 10
# sub-pixel resolution kit
CHAR_SUBP_NW = 226
CHAR_SUBP_NE = 227
CHAR_SUBP_N = 228
CHAR_SUBP_SE = 229
CHAR_SUBP_DIAG = 230
CHAR_SUBP_E = 231
CHAR_SUBP_SW = 232
# font flags
FONT_LAYOUT_ASCII_INCOL = 1
FONT_LAYOUT_ASCII_INROW = 2
FONT_TYPE_GREYSCALE = 4
FONT_TYPE_GRAYSCALE = 4
FONT_LAYOUT_TCOD = 8
# color control codes
COLCTRL_1=1
COLCTRL_2=2
COLCTRL_3=3
COLCTRL_4=4
COLCTRL_5=5
COLCTRL_NUMBER=5
COLCTRL_FORE_RGB=6
COLCTRL_BACK_RGB=7
COLCTRL_STOP=8
# initializing the console
def console_init_root(w, h, title, fullscreen=False):
_lib.TCOD_console_init_root(w, h, title, c_uint(fullscreen))
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_set_custom_font(fontFile, flags=FONT_LAYOUT_ASCII_INCOL, nb_char_horiz=0, nb_char_vertic=0):
_lib.TCOD_console_set_custom_font(fontFile, flags, nb_char_horiz, nb_char_vertic)
def console_map_ascii_code_to_font(asciiCode, fontCharX, fontCharY):
if type(asciiCode) == str:
_lib.TCOD_console_map_ascii_code_to_font(ord(asciiCode), fontCharX,
fontCharY)
else:
_lib.TCOD_console_map_ascii_code_to_font(asciiCode, fontCharX,
fontCharY)
def console_map_ascii_codes_to_font(firstAsciiCode, nbCodes, fontCharX,
fontCharY):
if type(firstAsciiCode) == str:
_lib.TCOD_console_map_ascii_codes_to_font(ord(firstAsciiCode), nbCodes,
fontCharX, fontCharY)
else:
_lib.TCOD_console_map_ascii_codes_to_font(firstAsciiCode, nbCodes,
fontCharX, fontCharY)
def console_map_string_to_font(s, fontCharX, fontCharY):
_lib.TCOD_console_map_string_to_font(s, fontCharX, fontCharY)
def console_is_fullscreen():
return _lib.TCOD_console_is_fullscreen() == 1
def console_set_fullscreen(fullscreen):
_lib.TCOD_console_set_fullscreen(c_int(fullscreen))
def console_is_window_closed():
return _lib.TCOD_console_is_window_closed() == 1
def console_set_window_title(title):
_lib.TCOD_console_set_window_title(title)
def console_credits():
_lib.TCOD_console_credits()
def console_credits_reset():
_lib.TCOD_console_credits_reset()
def console_credits_render(x, y, alpha):
return _lib.TCOD_console_credits_render(x, y, c_int(alpha)) == 1
def console_flush():
_lib.TCOD_console_flush()
# drawing on a console
def console_set_background_color(con, col):
_lib.TCOD_console_set_background_color(con, col)
def console_set_foreground_color(con, col):
_lib.TCOD_console_set_foreground_color(con, col)
def console_clear(con):
return _lib.TCOD_console_clear(con)
def console_put_char(con, x, y, c, flag=BKGND_SET):
if type(c) == str:
_lib.TCOD_console_put_char(con, x, y, ord(c), flag)
else:
_lib.TCOD_console_put_char(con, x, y, c, flag)
def console_put_char_ex(con, x, y, c, fore, back):
if type(c) == str:
_lib.TCOD_console_put_char_ex(con, x, y, ord(c), fore, back)
else:
_lib.TCOD_console_put_char_ex(con, x, y, c, fore, back)
def console_set_back(con, x, y, col, flag=BKGND_SET):
_lib.TCOD_console_set_back(con, x, y, col, flag)
def console_set_fore(con, x, y, col):
_lib.TCOD_console_set_fore(con, x, y, col)
def console_set_char(con, x, y, c):
if type(c) == str:
_lib.TCOD_console_set_char(con, x, y, ord(c))
else:
_lib.TCOD_console_set_char(con, x, y, c)
def console_print_left(con, x, y, bk, s):
_lib.TCOD_console_print_left(con, x, y, bk, s)
def console_print_right(con, x, y, bk, s):
_lib.TCOD_console_print_right(con, x, y, bk, s)
def console_print_center(con, x, y, bk, s):
_lib.TCOD_console_print_center(con, x, y, bk, s)
def console_print_left_rect(con, x, y, w, h, bk, s):
return _lib.TCOD_console_print_left_rect(con, x, y, w, h, bk, s)
def console_print_right_rect(con, x, y, w, h, bk, s):
return _lib.TCOD_console_print_right_rect(con, x, y, w, h, bk, s)
def console_print_center_rect(con, x, y, w, h, bk, s):
return _lib.TCOD_console_print_center_rect(con, x, y, w, h, bk, s)
def console_height_left_rect(con, x, y, w, h, s):
return _lib.TCOD_console_height_left_rect(con, x, y, w, h, s)
def console_height_right_rect(con, x, y, w, h, s):
return _lib.TCOD_console_height_right_rect(con, x, y, w, h, s)
def console_height_center_rect(con, x, y, w, h, s):
return _lib.TCOD_console_height_center_rect(con, x, y, w, h, s)
def console_rect(con, x, y, w, h, clr, flag=BKGND_SET):
_lib.TCOD_console_rect(con, x, y, w, h, c_int(clr), flag)
def console_hline(con, x, y, l):
_lib.TCOD_console_hline( con, x, y, l)
def console_vline(con, x, y, l):
_lib.TCOD_console_vline( con, x, y, l)
def console_print_frame(con, x, y, w, h, clr, bkflg, s):
_lib.TCOD_console_print_frame(con, x, y, w, h, c_int(clr), bkflg, s)
def console_set_color_control(con,fore,back) :
_lib.TCOD_console_set_color_control(con,fore,back)
def console_get_background_color(con):
iret=_lib.TCOD_console_get_background_color_wrapper(con)
return int_to_col(iret)
def console_get_foreground_color(con):
iret=_lib.TCOD_console_get_foreground_color_wrapper(con)
return int_to_col(iret)
def console_get_back(con, x, y):
iret=_lib.TCOD_console_get_back_wrapper(con, x, y)
return int_to_col(iret)
def console_get_fore(con, x, y):
iret=_lib.TCOD_console_get_fore_wrapper(con, x, y)
return int_to_col(iret)
def console_get_char(con, x, y):
return _lib.TCOD_console_get_char(con, x, y)
def console_set_fade(fade, fadingColor):
_lib.TCOD_console_set_fade_wrapper(fade, col_to_int(fadingColor))
def console_get_fade():
return _lib.TCOD_console_get_fade().value
def console_get_fading_color():
return int_to_col(_lib.TCOD_console_get_fading_color_wrapper())
# handling keyboard input
def console_wait_for_keypress(flush):
k=Key()
_lib.TCOD_console_wait_for_keypress_wrapper(byref(k),c_int(flush))
return k
def console_check_for_keypress(flags=KEY_RELEASED):
k=Key()
_lib.TCOD_console_check_for_keypress_wrapper(byref(k),c_int(flags))
return k
def console_is_key_pressed(key):
return _lib.TCOD_console_is_key_pressed(key) == 1
def console_set_keyboard_repeat(initial_delay, interval):
_lib.TCOD_console_set_keyboard_repeat(initial_delay, interval)
def console_disable_keyboard_repeat():
_lib.TCOD_console_disable_keyboard_repeat()
# using offscreen consoles
def console_new(w, h):
return _lib.TCOD_console_new(w, h)
def console_get_width(con):
return _lib.TCOD_console_get_width(con)
def console_get_height(con):
return _lib.TCOD_console_get_height(con)
def console_blit(src, x, y, w, h, dst, xdst, ydst, ffade=1.0,bfade=1.0):
_lib.TCOD_console_blit(src, x, y, w, h, dst, xdst, ydst, c_float(ffade), c_float(bfade))
def console_set_key_color(con, col):
_lib.TCOD_console_set_key_color(con, col)
def console_delete(con):
_lib.TCOD_console_delete(con)
# fast color filling
def console_fill_foreground(con,r,g,b) :
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
cg = g.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
cb = b.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
elif (isinstance(r, list) and isinstance(g, list) and isinstance(b, list)):
#simple python lists, convert using ctypes
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
else:
raise TypeError('R, G and B must all be of the same type (list or NumPy array)')
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
_lib.TCOD_console_fill_foreground(con, cr, cg, cb)
def console_fill_background(con,r,g,b) :
if (numpy_available and isinstance(r, numpy.ndarray) and
isinstance(g, numpy.ndarray) and isinstance(b, numpy.ndarray)):
#numpy arrays, use numpy's ctypes functions
r = numpy.ascontiguousarray(r, dtype=numpy.int_)
g = numpy.ascontiguousarray(g, dtype=numpy.int_)
b = numpy.ascontiguousarray(b, dtype=numpy.int_)
cr = r.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
cg = g.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
cb = b.ctypes.data_as(ctypes.POINTER(ctypes.c_int))
elif (isinstance(r, list) and isinstance(g, list) and isinstance(b, list)):
#simple python lists, convert using ctypes
cr = (c_int * len(r))(*r)
cg = (c_int * len(g))(*g)
cb = (c_int * len(b))(*b)
else:
raise TypeError('R, G and B must all be of the same type (list or NumPy array)')
if len(r) != len(g) or len(r) != len(b):
raise TypeError('R, G and B must all have the same size.')
_lib.TCOD_console_fill_background(con, cr, cg, cb)
############################
# sys module
############################
_lib.TCOD_sys_get_last_frame_length.restype = c_float
_lib.TCOD_sys_elapsed_seconds.restype = c_float
# high precision time functions
def sys_set_fps(fps):
_lib.TCOD_sys_set_fps(fps)
def sys_get_fps():
return _lib.TCOD_sys_get_fps()
def sys_get_last_frame_length():
return _lib.TCOD_sys_get_last_frame_length()
def sys_sleep_milli(val):
_lib.TCOD_sys_sleep_milli(c_uint(val))
def sys_update_char(ascii,img,x,y):
_lib.TCOD_sys_update_char(c_uint(ascii),img,c_int(x),c_int(y))
def sys_elapsed_milli():
return _lib.TCOD_sys_elapsed_milli()
def sys_elapsed_seconds():
return _lib.TCOD_sys_elapsed_seconds()
# easy screenshots
def sys_save_screenshot(name=0):
_lib.TCOD_sys_save_screenshot(name)
# custom fullscreen resolution
def sys_force_fullscreen_resolution(width, height):
_lib.TCOD_sys_force_fullscreen_resolution(width, height)
def sys_get_current_resolution():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_current_resolution(byref(w), byref(h))
return w.value, h.value
def sys_get_char_size():
w = c_int()
h = c_int()
_lib.TCOD_sys_get_char_size(byref(w), byref(h))
return w.value, h.value
# update font bitmap
def sys_update_char(asciiCode, fontx, fonty, img, x, y) :
_lib.TCOD_sys_update_char(c_int(asciiCode),c_int(fontx),c_int(fonty),img,c_int(x),c_int(y))
# custom SDL post renderer
SDL_RENDERER_FUNC = None
sdl_renderer_func = None
def sys_register_SDL_renderer(func):
global SDL_RENDERER_FUNC
global sdl_renderer_func
SDL_RENDERER_FUNC = CFUNCTYPE(None, c_void_p)
sdl_renderer_func = SDL_RENDERER_FUNC(func)
_lib.TCOD_sys_register_SDL_renderer(sdl_renderer_func)
############################
# line module
############################
_lib.TCOD_line_step.restype = c_uint
_lib.TCOD_line.restype=c_uint
def line_init(xo, yo, xd, yd):
_lib.TCOD_line_init(xo, yo, xd, yd)
def line_step():
x = c_int()
y = c_int()
ret = _lib.TCOD_line_step(byref(x), byref(y))
if not ret:
return x.value, y.value
return None,None
def line(xo,yo,xd,yd,py_callback) :
LINE_CBK_FUNC=CFUNCTYPE(c_uint,c_int,c_int)
def int_callback(x,y) :
if py_callback(x,y) :
return 1
return 0
c_callback=LINE_CBK_FUNC(int_callback)
return _lib.TCOD_line(xo,yo,xd,yd,c_callback) == 1
############################
# image module
############################
_lib.TCOD_image_is_pixel_transparent.restype = c_uint
def image_new(width, height):
return _lib.TCOD_image_new(width, height)
def image_clear(image,col) :
_lib.TCOD_image_clear(image,col)
def image_invert(image) :
_lib.TCOD_image_invert(image)
def image_hflip(image) :
_lib.TCOD_image_hflip(image)
def image_vflip(image) :
_lib.TCOD_image_vflip(image)
def image_scale(image, neww, newh) :
_lib.TCOD_image_scale(image,c_int(neww),c_int(newh))
def image_set_key_color(image,col) :
_lib.TCOD_image_set_key_color(image,col)
def image_get_alpha(image,x,y) :
return _lib.TCOD_image_get_alpha(image,c_int(x),c_int(y))
def image_is_pixel_transparent(image,x,y) :
return _lib.TCOD_image_is_pixel_transparent(image,c_int(x),c_int(y)) == 1
def image_load(filename):
return _lib.TCOD_image_load(filename)
def image_from_console(console):
return _lib.TCOD_image_from_console(console)
def image_refresh_console(image, console):
_lib.TCOD_image_refresh_console(image, console)
def image_get_size(image):
w=c_int()
h=c_int()
_lib.TCOD_image_get_size(image, byref(w), byref(h))
return w.value, h.value
def image_get_pixel(image, x, y):
return int_to_col(_lib.TCOD_image_get_pixel_wrapper(image, x, y))
def image_get_mipmap_pixel(image, x0, y0, x1, y1):
return int_to_col(_lib.TCOD_image_get_mipmap_pixel_wrapper(image, c_float(x0), c_float(y0),
c_float(x1), c_float(y1)))
def image_put_pixel(image, x, y, col):
_lib.TCOD_image_put_pixel_wrapper(image, x, y, col_to_int(col))
def image_blit(image, console, x, y, bkgnd_flag, scalex, scaley, angle):
_lib.TCOD_image_blit(image, console, c_float(x), c_float(y), bkgnd_flag,
c_float(scalex), c_float(scaley), c_float(angle))
def image_blit_rect(image, console, x, y, w, h, bkgnd_flag):
_lib.TCOD_image_blit_rect(image, console, x, y, w, h, bkgnd_flag)
def image_blit_2x(image, console, dx, dy, sx=0, sy=0, w=-1, h=-1):
_lib.TCOD_image_blit_2x(image, console, dx,dy,sx,sy,w,h)
def image_save(image, filename):
_lib.TCOD_image_save(image, filename)
def image_delete(image):
_lib.TCOD_image_delete(image)
############################
# mouse module
############################
class Mouse(Structure):
_fields_=[('x', c_int, 32),
('y', c_int, 32),
('dx', c_int, 32),
('dy', c_int, 32),
('cx', c_int, 32),
('cy', c_int, 32),
('dcx', c_int, 32),
('dcy', c_int, 32),
('lbutton', c_uint, 8),
('rbutton', c_uint, 8),
('mbutton', c_uint, 8),
('lbutton_pressed', c_uint, 8),
('rbutton_pressed', c_uint, 8),
('mbutton_pressed', c_uint, 8),
('wheel_up', c_uint, 8),
('wheel_down', c_uint, 8),
]
_lib.TCOD_mouse_is_cursor_visible.restype = c_uint
def mouse_show_cursor(visible):
_lib.TCOD_mouse_show_cursor(c_int(visible))
def mouse_is_cursor_visible():
return _lib.TCOD_mouse_is_cursor_visible() == 1
def mouse_move(x, y):
_lib.TCOD_mouse_move(x, y)
def mouse_get_status():
m = Mouse()
_lib.TCOD_mouse_get_status_wrapper(byref(m))
return m
############################
# parser module
############################
_lib.TCOD_struct_get_name.restype = c_char_p
_lib.TCOD_struct_is_mandatory.restype = c_uint
_lib.TCOD_parser_get_bool_property.restype = c_uint
_lib.TCOD_parser_get_float_property.restype = c_float
_lib.TCOD_parser_get_string_property.restype = c_char_p
class Dice(Structure):
_fields_=[('nb_dices', c_int),
('nb_faces', c_int),
('multiplier', c_float),
('addsub', c_float),
]
class _CValue(Union):
_fields_=[('c',c_uint),
('i',c_int),
('f',c_float),
('s',c_char_p),
('col',Color),
('dice',Dice),
('custom',c_void_p),
]
_CFUNC_NEW_STRUCT = CFUNCTYPE(c_uint, c_void_p, c_char_p)
_CFUNC_NEW_FLAG = CFUNCTYPE(c_uint, c_char_p)
_CFUNC_NEW_PROPERTY = CFUNCTYPE(c_uint, c_char_p, c_int, _CValue)
class _CParserListener(Structure):
_fields_=[('new_struct', _CFUNC_NEW_STRUCT),
('new_flag',_CFUNC_NEW_FLAG),
('new_property',_CFUNC_NEW_PROPERTY),
('end_struct',_CFUNC_NEW_STRUCT),
('error',_CFUNC_NEW_FLAG),
]
# property types
TYPE_NONE = 0
TYPE_BOOL = 1
TYPE_CHAR = 2
TYPE_INT = 3
TYPE_FLOAT = 4
TYPE_STRING = 5
TYPE_COLOR = 6
TYPE_DICE = 7
TYPE_VALUELIST00 = 8
TYPE_VALUELIST01 = 9
TYPE_VALUELIST02 = 10
TYPE_VALUELIST03 = 11
TYPE_VALUELIST04 = 12
TYPE_VALUELIST05 = 13
TYPE_VALUELIST06 = 14
TYPE_VALUELIST07 = 15
TYPE_VALUELIST08 = 16
TYPE_VALUELIST09 = 17
TYPE_VALUELIST10 = 18
TYPE_VALUELIST11 = 19
TYPE_VALUELIST12 = 20
TYPE_VALUELIST13 = 21
TYPE_VALUELIST14 = 22
TYPE_VALUELIST15 = 23
TYPE_LIST = 1024
def parser_new():
return _lib.TCOD_parser_new()
def parser_new_struct(parser, name):
return _lib.TCOD_parser_new_struct(parser, name)
def struct_add_flag(struct, name):
_lib.TCOD_struct_add_flag(struct, name)
def struct_add_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_property(struct, name, typ, c_int(mandatory))
def struct_add_value_list(struct, name, value_list, mandatory):
CARRAY = c_char_p * (len(value_list) + 1)
cvalue_list = CARRAY()
for i in range(len(value_list)):
cvalue_list[i] = cast(value_list[i], c_char_p)
cvalue_list[len(value_list)] = 0
_lib.TCOD_struct_add_value_list(struct, name, cvalue_list, c_int(mandatory))
def struct_add_list_property(struct, name, typ, mandatory):
_lib.TCOD_struct_add_list_property(struct, name, typ, c_int(mandatory))
def struct_add_structure(struct, sub_struct):
_lib.TCOD_struct_add_structure(struct, sub_struct)
def struct_get_name(struct):
return _lib.TCOD_struct_get_name(struct)
def struct_is_mandatory(struct, name):
return _lib.TCOD_struct_is_mandatory(struct, name) == 1
def struct_get_type(struct, name):
return _lib.TCOD_struct_get_type(struct, name)
def parser_run(parser, filename, listener=0):
if listener != 0:
clistener=_CParserListener()
def value_converter(name, typ, value):
if typ == TYPE_BOOL:
return listener.new_property(name, typ, value.c == 1)
elif typ == TYPE_CHAR:
return listener.new_property(name, typ, '%c' % (value.c & 0xFF))
elif typ == TYPE_INT:
return listener.new_property(name, typ, value.i)
elif typ == TYPE_FLOAT:
return listener.new_property(name, typ, value.f)
elif typ == TYPE_STRING or \
TYPE_VALUELIST15 >= typ >= TYPE_VALUELIST00:
return listener.new_property(name, typ, value.s)
elif typ == TYPE_COLOR:
return listener.new_property(name, typ, value.col)
elif typ == TYPE_DICE:
return listener.new_property(name, typ, value.dice)
return True
clistener.new_struct = _CFUNC_NEW_STRUCT(listener.new_struct)
clistener.new_flag = _CFUNC_NEW_FLAG(listener.new_flag)
clistener.new_property = _CFUNC_NEW_PROPERTY(value_converter)
clistener.end_struct = _CFUNC_NEW_STRUCT(listener.end_struct)
clistener.error = _CFUNC_NEW_FLAG(listener.error)
_lib.TCOD_parser_run(parser, filename, byref(clistener))
else:
_lib.TCOD_parser_run(parser, filename, 0)
def parser_delete(parser):
_lib.TCOD_parser_delete(parser)
def parser_get_bool_property(parser, name):
return _lib.TCOD_parser_get_bool_property(parser, name) == 1
def parser_get_int_property(parser, name):
return _lib.TCOD_parser_get_int_property(parser, name)
def parser_get_char_property(parser, name):
return '%c' % _lib.TCOD_parser_get_char_property(parser, name)
def parser_get_float_property(parser, name):
return _lib.TCOD_parser_get_float_property(parser, name)
def parser_get_string_property(parser, name):
return _lib.TCOD_parser_get_string_property(parser, name)
def parser_get_color_property(parser, name):
iret=_lib.TCOD_parser_get_color_property_wrapper(parser, name)
return int_to_col(iret)
def parser_get_dice_property(parser, name):
d = Dice()
_lib.TCOD_parser_get_dice_property_py(parser, name, byref(d))
return d
def parser_get_list_property(parser, name):
clist = _lib.TCOD_parser_get_list_property(parser, name)
res = list()
for i in range(_lib.TCOD_list_size(clist)):
elt = _lib.TCOD_list_get(clist, i)
res.append(elt)
return res
############################
# random module
############################
_lib.TCOD_random_get_float.restype = c_float
_lib.TCOD_random_get_gaussian_float.restype = c_float
RNG_MT = 0
RNG_CMWC = 1
def random_get_instance():
return _lib.TCOD_random_get_instance()
def random_new(algo=RNG_CMWC):
return _lib.TCOD_random_new(algo)
def random_new_from_seed(seed, algo=RNG_CMWC):
return _lib.TCOD_random_new_from_seed(algo,c_uint(seed))
def random_get_int(rnd, mi, ma):
return _lib.TCOD_random_get_int(rnd, mi, ma)
def random_get_float(rnd, mi, ma):
return _lib.TCOD_random_get_float(rnd, c_float(mi), c_float(ma))
def random_get_gaussian_float(rnd, mi, ma):
return _lib.TCOD_random_get_gaussian_float(rnd, c_float(mi), c_float(ma))
def random_get_gaussian_int(rnd, mi, ma):
return _lib.TCOD_random_get_gaussian_int(rnd, mi, ma)
def random_save(rnd):
return _lib.TCOD_random_save(rnd)
def random_restore(rnd, backup):
_lib.TCOD_random_restore(rnd, backup)
def random_delete(rnd):
_lib.TCOD_random_delete(rnd)
############################
# noise module
############################
_lib.TCOD_noise_perlin.restype = c_float
_lib.TCOD_noise_simplex.restype = c_float
_lib.TCOD_noise_wavelet.restype = c_float
_lib.TCOD_noise_fbm_perlin.restype = c_float
_lib.TCOD_noise_fbm_simplex.restype = c_float
_lib.TCOD_noise_fbm_wavelet.restype = c_float
_lib.TCOD_noise_turbulence_perlin.restype = c_float
_lib.TCOD_noise_turbulence_simplex.restype = c_float
_lib.TCOD_noise_turbulence_wavelet.restype = c_float
NOISE_DEFAULT_HURST = 0.5
NOISE_DEFAULT_LACUNARITY = 2.0
def noise_new(dim, h=NOISE_DEFAULT_HURST, l=NOISE_DEFAULT_LACUNARITY, rnd=0):
return _lib.TCOD_noise_new(dim, c_float(h), c_float(l), rnd)
def _noise_int(n, f, func):
ct = c_float * len(f)
cf = ct()
i = 0
for value in f:
cf[i] = c_float(value)
i += 1
return func(n, cf)
def noise_perlin(n, f):
return _noise_int(n, f, _lib.TCOD_noise_perlin)
def noise_simplex(n, f):
return _noise_int(n, f, _lib.TCOD_noise_simplex)
def noise_wavelet(n, f):
return _noise_int(n, f, _lib.TCOD_noise_wavelet)
def _noise_int2(n, f, oc, func):
ct = c_float * len(f)
cf = ct()
i = 0
for value in f:
cf[i] = c_float(value)
i += 1
return func(n, cf, c_float(oc))
def noise_fbm_perlin(n, f, oc):
return _noise_int2(n, f, oc, _lib.TCOD_noise_fbm_perlin)
def noise_fbm_simplex(n, f, oc):
return _noise_int2(n, f, oc, _lib.TCOD_noise_fbm_simplex)
def noise_fbm_wavelet(n, f, oc):
return _noise_int2(n, f, oc, _lib.TCOD_noise_fbm_wavelet)
def noise_turbulence_perlin(n, f, oc):
return _noise_int2(n, f, oc, _lib.TCOD_noise_turbulence_perlin)
def noise_turbulence_simplex(n, f, oc):
return _noise_int2(n, f, oc, _lib.TCOD_noise_turbulence_simplex)
def noise_turbulence_wavelet(n, f, oc):
return _noise_int2(n, f, oc, _lib.TCOD_noise_turbulence_wavelet)
def noise_delete(n):
_lib.TCOD_noise_delete(n)
############################
# fov module
############################
FOV_BASIC = 0
FOV_DIAMOND = 1
FOV_SHADOW = 2
FOV_PERMISSIVE_0 = 3
FOV_PERMISSIVE_1 = 4
FOV_PERMISSIVE_2 = 5
FOV_PERMISSIVE_3 = 6
FOV_PERMISSIVE_4 = 7
FOV_PERMISSIVE_5 = 8
FOV_PERMISSIVE_6 = 9
FOV_PERMISSIVE_7 = 10
FOV_PERMISSIVE_8 = 11
FOV_RESTRICTIVE = 12
NB_FOV_ALGORITHMS = 13
def FOV_PERMISSIVE(p) :
return FOV_PERMISSIVE_0+p
def map_new(w, h):
return _lib.TCOD_map_new(w, h)
def map_copy(source, dest):
return _lib.TCOD_map_copy(source, dest)
def map_set_properties(m, x, y, isTrans, isWalk):
_lib.TCOD_map_set_properties(m, x, y, c_int(isTrans), c_int(isWalk))
def map_clear(m):
_lib.TCOD_map_clear(m)
def map_compute_fov(m, x, y, radius=0, light_walls=True, algo=FOV_RESTRICTIVE ):
_lib.TCOD_map_compute_fov(m, x, y, c_int(radius), c_uint(light_walls), c_int(algo))
def map_is_in_fov(m, x, y):
return _lib.TCOD_map_is_in_fov(m, x, y) == 1
def map_is_transparent(m, x, y):
return _lib.TCOD_map_is_transparent(m, x, y) == 1
def map_is_walkable(m, x, y):
return _lib.TCOD_map_is_walkable(m, x, y) == 1
def map_delete(m):
return _lib.TCOD_map_delete(m)
############################
# pathfinding module
############################
_lib.TCOD_path_compute.restype = c_uint
_lib.TCOD_path_is_empty.restype = c_uint
_lib.TCOD_path_walk.restype = c_uint
def path_new_using_map(m, dcost=1.41):
return _lib.TCOD_path_new_using_map(c_void_p(m), c_float(dcost))
PATH_CBK_FUNC = None
cbk_func = None
def path_new_using_function(w, h, func, userdata=0, dcost=1.41):
global PATH_CBK_FUNC
global cbk_func
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
cbk_func = PATH_CBK_FUNC(func)
return _lib.TCOD_path_new_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost))
def path_compute(p, ox, oy, dx, dy):
return _lib.TCOD_path_compute(p, ox, oy, dx, dy) == 1
def path_get_origin(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_origin(p, byref(x), byref(y))
return x.value, y.value
def path_get_destination(p):
x = c_int()
y = c_int()
_lib.TCOD_path_get_destination(p, byref(x), byref(y))
return x.value, y.value
def path_size(p):
return _lib.TCOD_path_size(p)
def path_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_path_get(p, idx, byref(x), byref(y))
return x.value, y.value
def path_is_empty(p):
return _lib.TCOD_path_is_empty(p) == 1
def path_walk(p, recompute):
x = c_int()
y = c_int()
if _lib.TCOD_path_walk(p, byref(x), byref(y), c_int(recompute)):
return x.value, y.value
return None,None
def path_delete(p):
_lib.TCOD_path_delete(p)
_lib.TCOD_dijkstra_path_set.restype = c_uint
_lib.TCOD_dijkstra_is_empty.restype = c_uint
_lib.TCOD_dijkstra_size.restype = c_int
_lib.TCOD_dijkstra_path_walk.restype = c_uint
_lib.TCOD_dijkstra_get_distance.restype = c_float
def dijkstra_new(m, dcost=1.41):
return _lib.TCOD_dijkstra_new(c_void_p(m), c_float(dcost))
PATH_CBK_FUNC = None
cbk_func = None
def dijkstra_new_using_function(w, h, func, userdata=0, dcost=1.41):
global PATH_CBK_FUNC
global cbk_func
PATH_CBK_FUNC = CFUNCTYPE(c_float, c_int, c_int, c_int, c_int, py_object)
cbk_func = PATH_CBK_FUNC(func)
return _lib.TCOD_path_dijkstra_using_function(w, h, cbk_func,
py_object(userdata), c_float(dcost))
def dijkstra_compute(p, ox, oy):
_lib.TCOD_dijkstra_compute(p, c_int(ox), c_int(oy))
def dijkstra_path_set(p, x, y):
return _lib.TCOD_dijkstra_path_set(p, c_int(x), c_int(y))
def dijkstra_get_distance(p, x, y):
return _lib.TCOD_dijkstra_get_distance(p, c_int(x), c_int(y))
def dijkstra_size(p):
return _lib.TCOD_dijkstra_size(p)
def dijkstra_get(p, idx):
x = c_int()
y = c_int()
_lib.TCOD_dijkstra_get(p, c_int(idx), byref(x), byref(y))
return x.value, y.value
def dijkstra_is_empty(p):
return _lib.TCOD_dijkstra_is_empty(p) == 1
def dijkstra_path_walk(p):
x = c_int()
y = c_int()
if _lib.TCOD_dijkstra_path_walk(p, byref(x), byref(y)):
return x.value, y.value
return None,None
def dijkstra_delete(p):
_lib.TCOD_dijkstra_delete(p)
############################
# bsp module
############################
class _CBsp(Structure):
_fields_ = [('next', c_int, 32),
('father', c_int, 32),
('son', c_int, 32),
('x', c_int, 32),
('y', c_int, 32),
('w', c_int, 32),
('h', c_int, 32),
('position', c_int, 32),
('level', c_uint, 32),
('horizontal', c_uint, 32),
]
_lib.TCOD_bsp_new_with_size.restype = POINTER(_CBsp)
_lib.TCOD_bsp_left.restype = POINTER(_CBsp)
_lib.TCOD_bsp_right.restype = POINTER(_CBsp)
_lib.TCOD_bsp_father.restype = POINTER(_CBsp)
_lib.TCOD_bsp_is_leaf.restype = c_uint
_lib.TCOD_bsp_contains.restype = c_uint
_lib.TCOD_bsp_find_node.restype = POINTER(_CBsp)
# python class encapsulating the _CBsp pointer
class Bsp(object):
def __init__(self, cnode):
pcbsp = cast(cnode, POINTER(_CBsp))
self.p = pcbsp
def getx(self):
return self.p.contents.x
def setx(self, value):
self.p.contents.x = value
x = property(getx, setx)
def gety(self):
return self.p.contents.y
def sety(self, value):
self.p.contents.y = value
y = property(gety, sety)
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def getpos(self):
return self.p.contents.position
def setpos(self, value):
self.p.contents.position = value
position = property(getpos, setpos)
def gethor(self):
return self.p.contents.horizontal == 1
def sethor(self,value):
self.p.contents.horizontal = c_int(value)
horizontal = property(gethor, sethor)
def getlev(self):
return self.p.contents.level
def setlev(self,value):
self.p.contents.level = value
level = property(getlev, setlev)
def bsp_new_with_size(x, y, w, h):
return Bsp(_lib.TCOD_bsp_new_with_size(x, y, w, h))
def bsp_split_once(node, horizontal, position):
_lib.TCOD_bsp_split_once(node.p, c_int(horizontal), position)
def bsp_split_recursive(node, randomizer, nb, minHSize, minVSize, maxHRatio,
maxVRatio):
_lib.TCOD_bsp_split_recursive(node.p, randomizer, nb, minHSize, minVSize,
c_float(maxHRatio), c_float(maxVRatio))
def bsp_resize(node, x, y, w, h):
_lib.TCOD_bsp_resize(node.p, x, y, w, h)
def bsp_left(node):
return Bsp(_lib.TCOD_bsp_left(node.p))
def bsp_right(node):
return Bsp(_lib.TCOD_bsp_right(node.p))
def bsp_father(node):
return Bsp(_lib.TCOD_bsp_father(node.p))
def bsp_is_leaf(node):
return _lib.TCOD_bsp_is_leaf(node.p) == 1
def bsp_contains(node, cx, cy):
return _lib.TCOD_bsp_contains(node.p, cx, cy) == 1
def bsp_find_node(node, cx, cy):
return Bsp(_lib.TCOD_bsp_find_node(node.p, cx, cy))
def _bsp_traverse(node, callback, userData, func):
BSP_CBK_FUNC = CFUNCTYPE(c_int, c_void_p, c_void_p)
# convert the c node into a python node
#before passing it to the actual callback
def node_converter(cnode, data):
node = Bsp(cnode)
return callback(node, data)
cbk_func = BSP_CBK_FUNC(node_converter)
func(node.p, cbk_func, userData)
def bsp_traverse_pre_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_pre_order)
def bsp_traverse_in_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_in_order)
def bsp_traverse_post_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_post_order)
def bsp_traverse_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData, _lib.TCOD_bsp_traverse_level_order)
def bsp_traverse_inverted_level_order(node, callback, userData=0):
_bsp_traverse(node, callback, userData,
_lib.TCOD_bsp_traverse_inverted_level_order)
def bsp_remove_sons(node):
_lib.TCOD_bsp_remove_sons(node.p)
def bsp_delete(node):
_lib.TCOD_bsp_delete(node.p)
############################
# heightmap module
############################
class _CHeightMap(Structure):
_fields_=[('w', c_int),
('h', c_int),
('values', POINTER(c_float)),
]
_lib.TCOD_heightmap_new.restype = POINTER(_CHeightMap)
_lib.TCOD_heightmap_get_value.restype = c_float
_lib.TCOD_heightmap_has_land_on_border.restype = c_uint
class HeightMap(object):
def __init__(self, chm):
pchm = cast(chm, POINTER(_CHeightMap))
self.p = pchm
def getw(self):
return self.p.contents.w
def setw(self, value):
self.p.contents.w = value
w = property(getw, setw)
def geth(self):
return self.p.contents.h
def seth(self, value):
self.p.contents.h = value
h = property(geth, seth)
def heightmap_new(w, h):
phm = _lib.TCOD_heightmap_new(w, h)
return HeightMap(phm)
def heightmap_set_value(hm, x, y, value):
_lib.TCOD_heightmap_set_value(hm.p, x, y, c_float(value))
def heightmap_add(hm, value):
_lib.TCOD_heightmap_add(hm.p, c_float(value))
def heightmap_scale(hm, value):
_lib.TCOD_heightmap_scale(hm.p, c_float(value))
def heightmap_clear(hm):
_lib.TCOD_heightmap_clear(hm.p)
def heightmap_clamp(hm, mi, ma):
_lib.TCOD_heightmap_clamp(hm.p, c_float(mi),c_float(ma))
def heightmap_copy(hm1, hm2):
_lib.TCOD_heightmap_copy(hm1.p, hm2.p)
def heightmap_normalize(hm, mi=0.0, ma=1.0):
_lib.TCOD_heightmap_normalize(hm.p, c_float(mi), c_float(ma))
def heightmap_lerp_hm(hm1, hm2, hm3, coef):
_lib.TCOD_heightmap_lerp_hm(hm1.p, hm2.p, hm3.p, c_float(coef))
def heightmap_add_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_add_hm(hm1.p, hm2.p, hm3.p)
def heightmap_multiply_hm(hm1, hm2, hm3):
_lib.TCOD_heightmap_multiply_hm(hm1.p, hm2.p, hm3.p)
def heightmap_add_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_add_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_dig_hill(hm, x, y, radius, height):
_lib.TCOD_heightmap_dig_hill(hm.p, c_float( x), c_float( y),
c_float( radius), c_float( height))
def heightmap_rain_erosion(hm, nbDrops, erosionCoef, sedimentationCoef, rnd=0):
_lib.TCOD_heightmap_rain_erosion(hm.p, nbDrops, c_float( erosionCoef),
c_float( sedimentationCoef), rnd)
def heightmap_kernel_transform(hm, kernelsize, dx, dy, weight, minLevel,
maxLevel):
FARRAY = c_float * kernelsize
IARRAY = c_int * kernelsize
cdx = IARRAY()
cdy = IARRAY()
cweight = FARRAY()
for i in range(kernelsize):
cdx[i] = c_int(dx[i])
cdy[i] = c_int(dy[i])
cweight[i] = c_float(weight[i])
_lib.TCOD_heightmap_kernel_transform(hm.p, kernelsize, cdx, cdy, cweight,
c_float(minLevel), c_float(maxLevel))
def heightmap_add_voronoi(hm, nbPoints, nbCoef, coef, rnd=0):
FARRAY = c_float * nbCoef
ccoef = FARRAY()
for i in range(nbCoef):
ccoef[i] = c_float(coef[i])
_lib.TCOD_heightmap_add_voronoi(hm.p, nbPoints, nbCoef, ccoef, rnd)
def heightmap_add_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta, scale):
_lib.TCOD_heightmap_add_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_scale_fbm(hm, noise, mulx, muly, addx, addy, octaves, delta,
scale):
_lib.TCOD_heightmap_scale_fbm(hm.p, noise, c_float(mulx), c_float(muly),
c_float(addx), c_float(addy),
c_float(octaves), c_float(delta),
c_float(scale))
def heightmap_dig_bezier(hm, px, py, startRadius, startDepth, endRadius,
endDepth):
IARRAY = c_int * 4
cpx = IARRAY()
cpy = IARRAY()
for i in range(4):
cpx[i] = c_int(px[i])
cpy[i] = c_int(py[i])
_lib.TCOD_heightmap_dig_bezier(hm.p, cpx, cpy, c_float(startRadius),
c_float(startDepth), c_float(endRadius),
c_float(endDepth))
def heightmap_get_value(hm, x, y):
return _lib.TCOD_heightmap_get_value(hm.p, x, y)
def heightmap_get_interpolated_value(hm, x, y):
return _lib.TCOD_heightmap_get_interplated_value(hm.p, c_float(x),
c_float(y))
def heightmap_get_slope(hm, x, y):
return _lib.TCOD_heightmap_get_slope(hm.p, x, y)
def heightmap_get_normal(hm, x, y, waterLevel):
FARRAY = c_float * 3
cn = FARRAY()
_lib.TCOD_heightmap_get_normal(hm.p, c_float(x), c_float(y), cn,
c_float(waterLevel))
return cn[0], cn[1], cn[2]
def heightmap_count_cells(hm, mi, ma):
return _lib.TCOD_heightmap_count_cells(hm.p, c_float(mi), c_float(ma))
def heightmap_has_land_on_border(hm, waterlevel):
return _lib.TCOD_heightmap_has_land_on_border(hm.p,
c_float(waterlevel)) == 1
def heightmap_get_minmax(hm):
mi = c_float()
ma = c_float()
_lib.TCOD_heightmap_get_minmax(hm.p, byref(mi), byref(ma))
return mi.value, ma.value
def heightmap_delete(hm):
_lib.TCOD_heightmap_delete(hm.p)
############################
# name generator module
############################
_lib.TCOD_namegen_get_nb_sets_wrapper.restype = c_int
def namegen_parse(filename,random=0) :
_lib.TCOD_namegen_parse(filename,random)
def namegen_generate(name, allocate=0) :
return _lib.TCOD_namegen_generate(name, c_int(allocate))
def namegen_generate_custom(name, rule, allocate=0) :
return _lib.TCOD_namegen_generate(name, rule, c_int(allocate))
def namegen_get_sets():
nb=_lib.TCOD_namegen_get_nb_sets_wrapper()
SARRAY = c_char_p * nb;
setsa = SARRAY()
_lib.TCOD_namegen_get_sets_wrapper(setsa)
ret=list()
for i in range(nb):
ret.append(setsa[i])
return ret
def namegen_destroy() :
_lib.TCOD_namegen_destroy()
| Python |
from random import choice, random, shuffle
from math import log
import libtcodpy as T
from settings import *
from util import *
from item import Item
# do I need this?
import corpse
import ui
class Mob(object):
x, y = None, None
glyph = UNKNOWN_GLYPH
map = None
enters_walls = False
sanity_dice = None
real = True
# -4 = rests every other turn
# -1 = rests every 5th turn
# +1 = extra move every 5th turn
# +4 = extra move every other turn
speed = 0
# N = regens N/10 health points every turn
regen = 1
# damage reduction
armor = 0
def __init__(self):
self.to_regen = 0
@property
def tile(self):
return self.map.tiles[self.x][self.y]
def put(self, m, x, y):
tile = m.tiles[x][y]
self.map = m
self.x, self.y = x, y
assert self.tile.mob is None
self.tile.mob = self
m.mobs.append(self)
def remove(self):
self.tile.mob = None
self.map.mobs.remove(self)
def move(self, x, y):
self.tile.mob = None
self.x, self.y = x, y
assert self.tile.mob is None
self.tile.mob = self
def can_walk(self, dx, dy):
destx, desty = self.x+dx, self.y+dy
if not in_map(destx, desty):
return False
tile = self.map.tiles[destx][desty]
return (tile.walkable or self.enters_walls) and \
not tile.mob
def walk(self, dx, dy):
self.move(self.x+dx, self.y+dy)
def is_besides(self, mob):
return max(abs(self.x-mob.x),abs(self.y-mob.y)) == 1
# Called every time a mob has an opportunity to act (depending on speed)
def act(self):
if self.hp < self.max_hp:
self.to_regen += self.regen
if self.to_regen > 10:
self.hp = min(self.max_hp, self.to_regen/10+self.hp)
self.to_regen %= 10
# Called every turn
def heartbeat(self):
pass
class Player(Mob):
glyph = '@', T.white
regen = 4
name = 'you'
def __init__(self, wizard):
super(Player, self).__init__()
self.level = 1
self.sanity = MAX_SANITY
# dict letter -> effect
self.effects = {}
# A conscious effort to make the game harder
# self.max_hp = 28
self.max_hp = roll(8,6)
self.speed = 0
# Again, trying to make the game harder
# self.hp = self.max_hp
self.hp = self.max_hp / 2
import item
# A conscious effort to make the game harder
self.items = [item.Candle(), item.PotionSanity(), item.PotionHealing()]
# self.items = [item.Torch(), item.PotionSanity(), item.PotionSanity()]
self.items.append(random_by_level(1, Item.ALL)())
if wizard:
self.items = [item.EterniumSword(), item.Lamp()]
self.items += [item.PotionSanity(), item.PotionHealing()]
self.items += [item.PotionLight(), item.PotionLevel()]
self.items += [item.PotionVictory()]
# I think this should auto-equip, but it doesn't. Wassup?
self.equipment = dict((slot, None) for slot in INVENTORY_SLOTS)
self.fov_range = 3
self.light_range = 0
self.action_turns = 1
self.armor = 0
self.exp = 0
self.death = None
self.won = False
@property
def dice(self):
weapon = self.equipment['w']
if weapon:
a, b, c = weapon.dice
else:
a, b, c = 1, 3, 0
c += self.level-1
return a, b, c
def add_exp(self, mob):
self.exp += int(1.7 ** mob.level)
new_level = min(int(log(self.exp/5+2, 2)), MAX_CLEVEL)
while new_level > self.level:
self.advance()
def advance(self):
self.level += 1
hp_inc = roll(2,6,self.level+3)
self.max_hp += hp_inc
self.hp += hp_inc
ui.message('Congratulations! You advance to level %d.' % self.level,
T.yellow)
def change_light_range(self, n):
self.light_range += n
self.fov_range += n
self.map.recalc_fov()
def has_equipped(self, item):
return item.slot and self.equipment[item.slot] == item
def put(self, m, x, y):
super(Player, self).put(m, x, y)
self.map.player = self
self.map.recalc_fov()
def move(self, x, y):
super(Player, self).move(x, y)
self.map.recalc_fov()
self.tile.on_enter()
if self.tile.items:
if len(self.tile.items) == 1:
ui.message('You see here %s.' % self.tile.items[0].a)
else:
ui.message('Several items are lying here.')
if self.tile.corpses:
if len(self.tile.corpses) == 1:
ui.message('There is %s here.' % self.tile.corpses[0].a)
else:
ui.message('There are some corpses here.')
self.use_energy()
def walk(self, dx, dy, panic=True):
destx, desty = self.x+dx, self.y+dy
if not in_map(destx, desty):
return False
tile = self.map.tiles[destx][desty]
if panic and 'f' in self.effects:
neighbors = self.map.neighbor_tiles(self.x, self.y)
n_monsters = sum(1 if tile.mob else 0 for tile in neighbors)
if roll(1, 12) <= min(6, n_monsters+1):
ui.message('You panic!', T.yellow)
dx, dy = choice(ALL_DIRS)
self.walk(dx, dy, False)
return
if tile.mob:
self.attack(tile.mob)
elif not tile.walkable:
ui.message('You bump into a wall.')
pass
else:
self.move(destx, desty)
def use(self, item):
if item.slot is None:
item.on_use(self)
self.use_energy()
elif self.has_equipped(item):
self.unequip(item)
else:
self.equip(item)
def unequip(self, item):
ui.message('You unequip the %s.' % item.descr)
item.on_unequip(self)
self.equipment[item.slot] = None
self.use_energy()
def equip(self, item):
old_item = self.equipment[item.slot]
if old_item:
self.unequip(old_item)
ui.message('You equip the %s.' % item.descr)
item.on_equip(self)
self.equipment[item.slot] = item
self.use_energy()
def attack(self, mon):
dmg = roll(*self.dice)
if roll(1, 20) < 20:
ui.message('You hit the %s.' % mon.name)
else:
ui.message('You critically hit the %s!' % mon.name, T.yellow)
dmg *= 2
mon.damage(dmg)
self.use_energy()
def damage(self, dmg, mon):
dmg -= self.armor
if dmg < 0:
ui.message('Your armor protects you.')
return
self.hp -= dmg
if self.hp <= 0:
if not self.death:
ui.message('You die...', T.red)
# everything has to look normal?
mon.look_normal()
self.death = 'killed by %s%s' % (
('imaginary ' if not mon.real else ''),
mon.name)
def pick_up(self, item):
if len(self.items) == INVENTORY_SIZE:
ui.message('You can\'t carry anymore items.', T.red)
return
assert item in self.tile.items
self.tile.items.remove(item)
self.items.append(item)
ui.message('You pick up the %s.' % item.descr)
self.use_energy()
def lose(self, item):
# This started as a copy of drop() without an explicit unequip()
self.items.remove(item)
ui.message('You lose the %s.' % item.descr)
self.use_energy()
def drop(self, item):
if self.has_equipped(item):
self.unequip(item)
self.items.remove(item)
self.tile.items.append(item)
ui.message('You drop the %s.' % item.descr)
self.use_energy()
def act(self):
if not self.death:
super(Player, self).act()
self.action_turns += 1
def use_energy(self):
self.action_turns -= 1
def wait(self):
self.use_energy()
def extinguish(self, light):
ui.message('Your %s is extinguished!' % light.descr)
if 'd' in self.effects:
ui.message('You are likely to be eaten by a grue.')
light.on_unequip(self)
self.equipment['l'] = None
self.items.remove(light)
def heartbeat(self):
super(Player, self).heartbeat()
light = self.equipment['l']
if light:
light.turns_left -= 1
if light.turns_left <= 0:
self.extinguish(light)
# some types of lights should backfire
if roll(1, 20) == 1:
# the original game rolled a d11 every turn
self.decrease_sanity(roll(1, max(2, self.map.level-4)))
else:
# The chance of decreasing sanity should depend on light
if roll(1, 4) == 1:
self.decrease_sanity(roll(1, max(2, self.map.level-4)))
def decrease_sanity(self, n):
if n <= 0:
return
from effect import add_insane_effects
self.sanity -= n
if self.sanity <= 0:
ui.message('You feel reality slipping away...', T.red)
self.death = 'insane'
else:
add_insane_effects(self)
for eff in self.effects.values():
if roll(1, 80) > self.sanity:
severity = roll(1, (8-self.sanity/10))
eff.do_effect(severity)
def increase_sanity(self, n):
if n <= 0:
return
if self.sanity + n < MAX_SANITY:
self.sanity += n
else:
self.restore_sanity()
def restore_sanity(self):
self.sanity = MAX_SANITY
ui.message('You feel more awake.', T.yellow)
for eff in self.effects.values():
eff.remove()
def resurrect(self):
self.death = None
if self.hp <= 0:
self.hp = self.max_hp
if self.sanity <= 0:
self.restore_sanity()
class Monster(Mob):
ALL = []
__metaclass__ = Register
ABSTRACT = True
real = True
multi = 1
common = 10
summoner = False
fov_range = 5
# n/30 is probability of item drop
drop_rate = 3
fears_light = False
desires_light = False
extinguishes_light = False
# Custom Attack Messages
attack_message = None
crit_message = None
sanity_message = None
def __init__(self):
super(Monster, self).__init__()
self.hp = self.max_hp
#self.real = real
def look_like(self, cls):
self.name = cls.name
self.glyph = cls.glyph
def look_normal(self):
try:
del self.name
del self.glyph
except AttributeError:
pass
def disappear(self):
ui.message('The %s disappears!' % self.name)
self.remove()
def damage(self, dmg):
if not (self.real or ('r' in self.map.player.effects)):
self.disappear()
return
dmg -= self.armor
if dmg < 0:
ui.message('The %s shrugs off the hit.' % self.name)
return
self.hp -= dmg
if self.hp <= 0:
if roll(1, 30) <= self.drop_rate:
item = random_by_level(self.map.level, Item.ALL)()
self.tile.items.append(item)
self.die()
else:
ui.message('The %s is %s.' % (self.name, self.get_wounds()))
def die(self):
self.look_normal()
if self.map.is_visible(self.x, self.y):
ui.message('The %s dies!' % self.name)
# drop a corpse
cps = corpse.Corpse(src_mon = self, age = 1)
self.tile.corpses.append(cps)
self.remove()
self.map.player.add_exp(self)
def get_wounds(self):
p = 100*self.hp/self.max_hp
if p < 10:
return 'almost dead'
elif p < 30:
return 'severely wounded'
elif p < 70:
return 'moderately wounded'
else:
return 'lightly wounded'
# return distance if monster can see player, None if not
def see_player(self):
player = self.map.player
fov_range = self.fov_range + player.light_range/2
if T.map_is_in_fov(self.map.fov_map, self.x, self.y):
d = distance(self.x, self.y, player.x, player.y)
if d <= fov_range:
return d
return None
def walk_randomly(self):
dirs = filter(lambda (dx, dy): self.can_walk(dx, dy),
ALL_DIRS)
if dirs != []:
self.walk(*choice(dirs))
def summon_monsters(self):
if self.map.is_visible(self.x, self.y):
ui.message('The %s summons monsters!' % self.name)
else:
ui.message('You hear arcane mumbling.')
n = roll(2, 3)
mcls = random_by_level(self.map.level, Monster.ALL)
dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
shuffle(dirs)
for dx, dy in dirs:
n = self.map.flood(self.x+dx, self.y+dy, mcls, n)
def act(self):
player = self.map.player
d = self.see_player()
if d:
if self.summoner and roll(1, 6) == 1:
self.summon_monsters()
return
# Ideally, this logic would live in attack_player()
# TODO: Move custom attacks out of act()
if self.extinguishes_light \
and player.is_besides(self) \
and player.light_range > 0 \
and roll(1, 6) == 1:
self.extinguish_player_light()
dx, dy = dir_towards(self.x, self.y,
player.x, player.y)
if player.light_range > 0 and self.fears_light:
if self.can_walk(-dx, -dy):
self.walk(-dx, -dy)
elif player.is_besides(self):
self.attack_player()
else:
self.walk_randomly()
elif player.light_range > 0 and self.desires_light:
if self.can_walk(dx, dy):
self.walk(dx, dy)
elif player.is_besides(self):
self.attack_player()
else:
self.walk_randomly()
elif self.desires_light:
if player.is_besides(self):
self.attack_player()
else:
self.walk_randomly()
else:
if player.is_besides(self):
self.attack_player()
elif self.can_walk(dx, dy):
self.walk(dx, dy)
else:
self.walk_randomly()
else:
self.walk_randomly()
def attack_player(self):
player = self.map.player
dmg = roll(*self.dice)
attack_message = self.attack_message
crit_message = self.crit_message
sanity_message = self.sanity_message
if roll(1, 20) < 20:
if not attack_message:
ui.message('The %s hits you.' % self.name)
else:
ui.message(attack_message)
else:
if not crit_message:
ui.message('The %s critically hits you!' % self.name, T.yellow)
else:
ui.message(crit_message, T.yellow)
dmg *= 2
# I do intend to knock sense into the player,
# even on imaginary critical hits
if roll(1, 3) == 1:
ui.message('The blow knocks some sense into you.', T.green)
player.increase_sanity(dmg)
if self.real or ('r' in player.effects):
player.damage(dmg, self)
if self.sanity_dice and not player.death:
d = roll(*self.sanity_dice)
if not sanity_message:
ui.message('You have trouble thinking straight!', T.yellow)
else:
ui.message(sanity_message, T.yellow)
player.decrease_sanity(d)
def extinguish_player_light(self):
player = self.map.player
light = player.equipment['l']
if light:
ui.message('The %s puts out your light!' % self.name, T.yellow)
player.extinguish(light)
else:
return
class UnrealMonster(Monster):
ABSTRACT = True
real = False
drop_rate = 0
class HappyMonster(UnrealMonster):
ABSTRACT = True
ALL = []
class DarkMonster(UnrealMonster):
ABSTRACT = True
ALL = []
fears_light = True
enters_walls = True
##### MONSTERS
class Ant(Monster):
name = 'ant'
glyph = 'a', T.blue
max_hp = 1
dice = 1, 2, 0
drop_rate = 0
multi = 6 # Maybe by the dozen was a little over the top?
common = 5 # Overpowered if they appear too often
level = 0 # Regular ants shouldn't provide any XP
class Fire_Ant(Monster):
name = 'fire ant'
glyph = 'a', T.red
max_hp = 4
dice = 1, 4, 1
drop_rate = 0
multi = 8 # A dozen is too many, but hordes are still desired
common = 3 # Make fire ants a special treat
level = 1
class Moth(Monster):
# Proof of concept for light-seeking behavior
name = 'moth'
glyph = 'm', T.light_grey
max_hp = 1
dice = 0, 0, 0
attack_message = 'the moth flutters harmlessly around your head'
crit_message = 'the moth critically hits your.... earlobe'
sanity_dice = 1, 3, -2 # Cause insanity 1/3 of time
sanity_message = 'the moth flutters maddeningly in your eyes'
drop_rate = 0
armor = 2
desires_light = True
level = 1
class GiantMoth(Monster):
# Expansion on the light-seeking; it's time to introduce true rudeness
name = 'giant moth'
glyph = 'm', T.red
max_hp = 8
dice = 0, 0, 0
attack_message = 'the giant moth flutters all around you'
crit_message = 'the giant moth criticallly flutters you'
sanity_dice = 1, 4, -2 # Cause insanity 1/2 of the time
sanity_message = 'you freak out as the moth flies around your face'
drop_rate = 0
armor = 3
level = 3
desires_light = True
extinguishes_light = True
class Rat(Monster):
name = 'rat'
glyph = 'r', T.dark_orange
max_hp = 4
dice = 1, 3, 0
drop_rate = 0
multi = 4
level = 1
class Bat(Monster):
name = 'bat'
glyph = 'B', T.dark_orange
max_hp = 5
speed = 2
dice = 1, 3, 0
fears_light = True
level = 1
class Goblin(Monster):
name = 'goblin'
glyph = 'g', T.light_blue
max_hp = 4
dice = 1, 6, 1
armor = 2
level = 2
class Orc(Monster):
name = 'orc'
glyph = 'o', T.red
max_hp = 8
dice = 1, 6, 3
armor = 4
level = 3
class MadAdventurer(Monster):
name = 'mad adventurer'
glyph = '@', T.violet
max_hp = 15
dice = 1, 10, 1
armor = 5
drop_rate = 18
common = 5
level = 4
class Ogre(Monster):
name = 'ogre'
glyph = 'O', T.dark_green
max_hp = 20
speed = -2
dice = 1, 8, 6
armor = 5
level = 4
class KillerBat(Monster):
name = 'killer bat'
glyph = 'B', T.orange
max_hp = 8
speed = 2
dice = 2, 8, 0
fears_light = True
multi = 5
armor = 4
level = 4
class Dragon(Monster):
name = 'dragon'
glyph = rainbow_glyph('D')
max_hp = 20
speed = -1
dice = 2, 8, 6
drop_rate = 30
armor = 8
level = 5
class Giant(Monster):
name = 'giant'
glyph = 'H', T.light_grey
max_hp = 20
speed = -2
dice = 3, 6, 4
armor = 7
level = 5
class Boss(Monster):
ABSTRACT = True # suppress random generation
name = 'Dungeon Master'
glyph = '@', T.grey
max_hp = 30
dice = 3, 4, 4
sanity_dice = 1, 10, 0
armor = 5
summoner = True
level = 6
def die(self):
super(Boss, self).die()
self.map.player.won = True
##### HAPPY (UNREAL) MONSTERS
class Star(HappyMonster):
ABSTRACT = True # suppress generation pending lights working
name = 'adorable floating star'
glyph = rainbow_glyph('s')
max_hp = 3
speed = -5
dice = 1, 3, 0
sanity_dice = 1, 2, 0
armor = 1
multi = 3
level = 1
class Butterfly(HappyMonster):
name = 'butterfly'
glyph = rainbow_glyph('b')
max_hp = 2
speed = 3
dice = 1, 3, 0
armor = 2
multi = 4
level = 1
class LittlePony(HappyMonster):
name = 'little pony'
glyph = rainbow_glyph('u')
max_hp = 7
dice = 1, 6, 3
multi = 3
level = 3
class PinkUnicorn(HappyMonster):
name = 'pink unicorn'
glyph = 'U', T.pink
max_hp = 10
dice = 1, 8, 4
level = 4
class RobotUnicorn(HappyMonster):
name = 'robot unicorn'
glyph = rainbow_glyph('U', remember=False)
max_hp = 10
dice = 1, 8, 6
level = 4
class FSM(HappyMonster):
name = 'flying spaghetti monster'
glyph = 'S', T.yellow
max_hp = 15
dice = 2, 6, 0
sanity_dice = 2, 6, 0
level = 5
##### DARK (UNREAL) MONSTERS
class GhostBat(DarkMonster):
name = 'ghost bat'
glyph = 'B', T.white
max_hp = 3
speed = 2
dice = 1, 4, 0
multi = 3
level = 1
class Ghost(DarkMonster):
name = 'ghost'
glyph = '@', T.white
max_hp = 6
dice = 1, 6, 0
level = 2
class Phantom(DarkMonster):
name = 'phantom'
glyph = '@', T.grey
max_hp = 6
dice = 1, 8, 0
sanity_dice = 1, 4, 0
enters_walls = True
multi = 3
level = 3
class Grue(DarkMonster):
name = 'grue'
glyph = 'G', T.grey
max_hp = 20
dice = 1, 10, 0
sanity_dice = 1, 6, 0
level = 4
class Cthulhu(DarkMonster):
name = 'Cthulhu'
glyph = '&', T.dark_green
max_hp = 20
dice = 2, 6, 0
sanity_dice = 1, 10, 0
level = 5
| Python |
from random import choice
import libtcodpy as T
from settings import *
from util import distance, describe_dice, in_map
STATUS_W = SCREEN_W-MAP_W-2
STATUS_H = 10
INV_W = SCREEN_W
INV_H = INVENTORY_SIZE + 3
FONT_INDEX = 0
def init(game):
global CON_MAP, CON_BUFFER, CON_STATUS, CON_INV, MESSAGES, GAME
GAME = game
T.console_set_custom_font(*FONTS[FONT_INDEX])
T.console_init_root(SCREEN_W, SCREEN_H, TITLE, False)
CON_MAP = T.console_new(MAP_W, MAP_H)
CON_BUFFER = T.console_new(SCREEN_W, BUFFER_H)
CON_STATUS = T.console_new(STATUS_W, STATUS_H)
CON_INV = T.console_new(INV_W, INV_H)
MESSAGES = []
def cycle_font():
global FONT_INDEX
FONT_INDEX = (FONT_INDEX + 1) % len(FONTS)
T.console_set_custom_font(*FONTS[FONT_INDEX])
T.console_init_root(SCREEN_W, SCREEN_H, TITLE, False)
def close():
GAME = None
T.console_delete(CON_MAP)
T.console_delete(CON_BUFFER)
T.console_delete(CON_STATUS)
T.console_delete(CON_INV)
def insanize_color(color, sanity):
if sanity < 50:
color2 = choice([
T.black, T.white, T.green, T.yellow,
T.sky, T.red, T.pink])
# 0.0 .. 0.4
d = 0.4*(1 - sanity / 50.0)
color = T.color_lerp(color, color2, d)
return color
else:
return color
def dim_color(color, sanity):
if sanity < 50:
h, s, v = T.color_get_hsv(color)
# 1.0 .. 0.2
s *= 0.2 + 0.8*(sanity/50.0)
# 1.0 .. 0.75
v *= 0.75 + 0.25*(sanity/50.0)
color2 = T.Color(color.r, color.g, color.b)
T.color_set_hsv(color2, h, s, v)
return color2
else:
return color
def _draw_map():
con = CON_MAP
player = GAME.player
for x in range(MAP_W):
for y in range(MAP_H):
tile = GAME.map.tiles[x][y]
if GAME.map.is_visible(x, y):
c, color = tile.visible_glyph
d = distance(x, y, player.x, player.y)
if d > player.light_range + 1:
color *= 0.6
if 'h' in player.effects:
color = insanize_color(color, player.sanity)
elif 'd' in player.effects:
color = dim_color(color, player.sanity)
else:
c, _ = tile.known_glyph
color = T.dark_grey*((player.sanity/100.0)*0.6+0.4)
T.console_put_char_ex(con, x, y, ord(c),
color, T.black)
T.console_blit(con, 0, 0, MAP_W, MAP_H,
None, 1, 1)
def status_lines():
return [
'Dlvl: %d' % GAME.map.level,
'',
'L%d%s' % (GAME.player.level, ' [wizard mode]' if GAME.wizard else ''),
'HP: %d/%d' % (GAME.player.hp, GAME.player.max_hp),
'Armor: %d' % GAME.player.armor,
'Sanity: %d%%' % GAME.player.sanity,
'Speed: %d' % GAME.player.speed,
'Damage: %s' % describe_dice(*GAME.player.dice),
'Turns: %d' % GAME.turns,
]
def _draw_status():
con = CON_STATUS
T.console_clear(con)
T.console_set_foreground_color(con, T.light_grey)
status = status_lines()
T.console_print_left(con, 0, 0, T.BKGND_NONE,
'\n'.join(status))
T.console_blit(CON_STATUS, 0, 0, STATUS_W, STATUS_H,
None, MAP_W+1, 1)
def _draw_messages():
con = CON_BUFFER
n = len(MESSAGES)
if n == 0:
return
start = max(n-BUFFER_H,0)
T.console_clear(con)
for i in range(start, n):
latest, s, color = MESSAGES[i]
if not latest:
color *= 0.6
T.console_set_foreground_color(
con,
color)
T.console_print_left(con, 0, i-start, T.BKGND_NONE, s)
T.console_blit(con, 0, 0, SCREEN_W, BUFFER_H,
None, 1, MAP_H+1)
def _draw_items(title, items):
con = CON_INV
T.console_clear(con)
T.console_set_foreground_color(con, T.white)
T.console_print_left(con, 1, 0, T.BKGND_NONE, title)
T.console_set_foreground_color(con, T.light_grey)
for i, item in enumerate(items):
T.console_put_char_ex(con, 2, i+2, (i+ord('a')),
T.light_grey, T.BKGND_NONE)
c, color = item.glyph
T.console_put_char_ex(con, 4, i+2, ord(c), color, T.black)
s = item.descr
if GAME.player.has_equipped(item):
T.console_put_char_ex(con, 0, i+2, ord('*'),
T.light_grey, T.BKGND_NONE)
T.console_set_foreground_color(con, T.white)
else:
T.console_set_foreground_color(con, T.grey)
T.console_print_left(con, 6, i+2, T.BKGND_NONE, s)
T.console_blit(con, 0, 0, INV_W, INV_H,
None, 1, 1)
def draw_inventory(title='Inventory', items=None):
_draw_items(title, items or GAME.player.items)
T.console_flush()
def select_item(title, items):
items = items[:INVENTORY_SIZE]
draw_inventory(title, items)
key = readkey()
if type(key) == str:
i = ord(key) - ord('a')
if 0 <= i < len(items):
return items[i]
return None
def draw_all():
T.console_clear(None)
_draw_map()
_draw_messages()
_draw_status()
T.console_flush()
def message(s, color=T.white):
s = s[0].upper() + s[1:]
print s
while len(MESSAGES) > BUFFER_H-1 and \
MESSAGES[-BUFFER_H][0]:
m = MESSAGES.pop()
MESSAGES.append((True, '[more]', T.green))
_draw_messages()
T.console_flush()
readkey()
MESSAGES.pop()
new_ui_turn()
MESSAGES.append(m)
MESSAGES.append((True, s, color))
_draw_messages()
T.console_flush()
def prompt(s, choices=None):
message(s, T.green)
#draw_all()
if choices:
choices = list(choices)
while True:
key = readkey()
if key in choices:
return key
else:
return readkey()
def new_ui_turn():
for i in reversed(range(len(MESSAGES))):
latest, s, color = MESSAGES[i]
if latest:
MESSAGES[i] = False, s, color
else:
break
def title_screen():
T.console_clear(None)
for i, txt in enumerate(TITLE_TEXT):
if isinstance(txt, tuple):
color, s = txt
T.console_set_foreground_color(None, color)
else:
s = txt
T.console_print_center(None, SCREEN_W/2, i+11, T.BKGND_NONE, s)
from time import sleep
display_sanity = 80
sanity_delta = -1
key = T.console_check_for_keypress()
color_splash, _ = TITLE_SPLASH[0]
color_throb, _ = TITLE_THROBBER[0]
while not key.vk == T.KEY_ENTER:
color2 = insanize_color(color, display_sanity)
T.console_set_foreground_color(None, color2)
for i, txt in enumerate(TITLE_SPLASH):
if isinstance(txt, tuple):
color, s = txt
else:
s = txt
T.console_print_center(None, SCREEN_W/2, i+4, T.BKGND_NONE, s)
throb_ratio = ((display_sanity + 20.0)/100.0)
color2 = T.color_lerp(color_throb, T.black, throb_ratio)
T.console_set_foreground_color(None, color2)
for i, txt in enumerate(TITLE_THROBBER):
if isinstance(txt, tuple):
color, s = txt
else:
s = txt
T.console_print_center(None, SCREEN_W/2, i+17, T.BKGND_NONE, s)
display_sanity += sanity_delta
if display_sanity > 79 or display_sanity < -19:
sanity_delta = sanity_delta * -1
T.console_flush()
sleep(0.1)
key = T.console_check_for_keypress()
def help_screen():
T.console_clear(None)
T.console_set_foreground_color(None, T.light_grey)
for i, line in enumerate(HELP_TEXT.split('\n')):
T.console_print_left(None, 1, 1+i, T.BKGND_NONE, line)
T.console_flush()
readkey()
def describe_tile(x, y):
if GAME.map.is_visible(x, y):
tile = GAME.map.tiles[x][y]
message('%s.' % tile.name, tile.glyph[1])
if tile.mob:
message('%s.' % tile.mob.name, tile.mob.glyph[1])
for item in tile.items:
message('%s.' % item.descr, item.glyph[1])
for corpse in tile.corpses:
message('%s.' % corpse.describe_corpse(GAME.player), corpse.glyph[1])
else:
message('Out of sight.', T.grey)
def look_mode():
global MESSAGES
from game import decode_key
x, y = GAME.player.x, GAME.player.y
_messages = MESSAGES
MESSAGES = []
message('Look mode - use movement keys, ESC/q to exit.', T.green)
new_ui_turn()
_draw_messages()
redraw = True
while True:
if redraw:
T.console_blit(CON_MAP, 0, 0, MAP_W, MAP_H,
None, 1, 1)
c = T.console_get_char(CON_MAP, x, y)
color = T.console_get_fore(CON_MAP, x, y)
T.console_put_char_ex(None, x+1, y+1, c,
T.black, color)
describe_tile(x, y)
_draw_messages()
T.console_flush()
# now clear the message buffer of last messages
while MESSAGES and MESSAGES[-1][0]:
MESSAGES.pop()
redraw = False
cmd = decode_key(readkey())
if cmd == 'quit':
break
elif isinstance(cmd, tuple):
name, args = cmd
if name == 'walk':
dx, dy = args
if in_map(x+dx, y+dy):
x, y = x+dx, y+dy
redraw = True
MESSAGES = _messages
def readkey():
while True:
key = T.console_wait_for_keypress(True)
#print key.vk, repr(chr(key.c))
if key.vk in [T.KEY_SHIFT, T.KEY_CONTROL, T.KEY_ALT,
T.KEY_CAPSLOCK]:
continue
if key.c != 0 and chr(key.c) not in '\x1b\n\r\t':
s = chr(key.c)
if key.shift:
s = s.upper()
return s
elif key.vk != 0:
return key.vk
| Python |
from app_settings import *
#Page html code, do not change!
html_code = """
<html xmlns:fb="http://www.facebook.com/2008/fbml" xmlns:og="%(host_url)s">
<head>
<meta property="og:title" content="%(page_title)s" />
<meta property="og:url" content="%(host_url)s" />
<meta property="og:site_name" content="%(site_name)s" />
<meta property="og:image" content="%(logo_url_s)s" />
<meta property="og:description" content="%(wall_message)s" />
<meta name="description" content="%(wall_message)s" />
<meta property="og:type" content="website" />
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>%(page_title)s</title>
<style>
body {background:url('static/bg.png') #fff repeat-x; font-family:Tahoma; text-align:center;}
a {color:#665; text-decoration:none;} a:hover{color:#3b5998;}
#container { width:810px; margin:0px auto 0px auto; }
.header {font-size:20px; font-weight:bold; color:#fff; position:relative; bottom:6px; text-align:left; }
.content {margin:0px auto 50px auto; width:500px; text-align:center;}
.fblogin {background:url('http://hortonbirtrhdaycard.com/images/share.gif'); width:89px; height:21px; border:none;}
.likediv {margin:0px auto 0px auto; text-align:center;}
.subtitle { font-size:18px; color:#333; font-weight:bold; margin:30px auto 30px auto;}
.instructions { font-size:12px; margin:0px auto 40px auto; }
.step { font-size:12px; font-weight:bold; }
.spacer {height:50px;}
.footborder {border-bottom-style:solid; border-bottom-width:1px; border-bottom-color:#c5c5c5; margin:0px auto 0px auto;}
.footlink { font-size:11px; color:#665; float:left; margin-left:3px;}
</style>
<!--[if IE]>
<style>
.header { position:relative; bottom:10px; right:65px;}
</style>
<![endif]-->
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.6.4/jquery.min.js"></script>
</head>
<script type="text/javascript">
window.fbAsyncInit = function() {
FB.init({appId: '%(app_id)s', status: true, cookie: true, xfbml: true});
FB.Event.subscribe('comment.create',
function (response) {
window.location = "%(redirect_url)s";
});
FB.Event.subscribe('comments.remove',
function (response) {
window.location = "%(redirect_url)s";
});
};
(function() {
var e = document.createElement('script');
e.async = true;
e.src = document.location.protocol + '//connect.facebook.net/en_US/all.js';
document.getElementById('fb-root').appendChild(e);
}());
//]]>
</script>
<body>
<div id="container">
<div class="header">%(page_header)s</div>
<div class="content">
<img src="%(logo_url_l)s">
<p class="subtitle">%(content)s</p>
<script src="http://connect.facebook.net/en_US/all.js#xfbml=1"></script><fb:comments href="%(host_url)s" num_posts="1" width="500"></fb:comments>
<script src="http://static.ak.fbcdn.net/connect.php/js/FB.Share"
type="text/javascript">
</script>
<div class="footborder"></div>
<a href="%(host_url)s" target="_blank" class="footlink">%(footer)s</a>
</div>
<div id="fb-root"></div>
<script src="http://connect.facebook.net/en_US/all.js"></script>
<script>
FB.init({ apiKey: '%(app_id)s', appId: '%(app_id)s', status: true, cookie: true, xfbml: true });
FB.Event.subscribe('edge.create', function(href, widget) {
$('#user-info').hide('fast');
$('#sharediv').show('fast');
});
FB.getLoginStatus(handleSessionResponse);
$('#login').bind('click', function() {
FB.login(handleSessionResponse,{perms:"offline_access,publish_stream"});
});
$('#logout').bind('click', function() {
FB.logout(handleSessionResponse);
$('#logindiv').show('fast');
$('#sharediv').hide('fast');
});
$('#disconnect').bind('click', function() {
FB.api({ method: 'Auth.revokeAuthorization' }, function(response) {
clearDisplay();
});
});
// no user, clear display
function clearDisplay() {
$('#user-info').hide('fast');
}
// handle a session response from any of the auth related calls
function handleSessionResponse(response) {
// if we dont have a session, just hide the user info
if (!response.session) {
clearDisplay();
return;
}
// if we have a session, query for the user's profile picture and name
FB.api(
{
method: 'fql.query',
query: 'SELECT name, pic FROM profile WHERE id=' + FB.getSession().uid
},
function(response) {
var user = response[0];
$('#logindiv').hide('fast');
$('#user-info').show('fast');
}
);
}
</script>
<script>var _wau = _wau || []; _wau.push(["tab", "o9w2p0dg65yt", "01r", "left-middle"]);(function() { var s=document.createElement("script"); s.async=true; s.src="http://widgets.amung.us/tab.js";document.getElementsByTagName("head")[0].appendChild(s);})();</script>
<script>
document.getElementsByTagName('html')[0].style.display='block';
</script>
</body>
</html>
""" % \
{
'app_id':app_id,
'app_secret':app_secret,
'host_url':host_url,
'redirect_url':redirect_url,
'page_title':page_title,
'page_header':page_header,
'site_name':site_name,
'logo_url_l':logo_url_l,
'logo_url_s':logo_url_s,
'content':content,
'footer':footer,
'wall_message':wall_message,
} | Python |
app_id = '207535655979693' # your facebook app id
app_secret = 'c937ba84e45eaae6b11964292bdfdb1e' #facebook app secret
host_url = 'http://574556.appspot.com/' #You google app url
redirect_url= 'http://www.google.com' #Where you want the user redirect to after they send the comment
page_title = 'FREE $25 Tim Hortons Gift Card' #The page title
page_header = 'FREE $25 Tim Hortons Gift Card - This Weekend Only' #What to display on the top of the website
site_name = 'FREE $25 Tim Hortons Gift Card' #The website name
logo_url_l = 'http://i.imgur.com/6M8CL.jpg' #Link to the logo picture
logo_url_s = 'http://i52.tinypic.com/344zgon.jpg' #The picture to show on user's wall
wall_message = 'Test test, this is a wall message - http://hb-demo.appspot.com/' #What you want to write on user's wall
content = """
This offer will expire Monday, October 17, or when the remaining <font color="#cc0000">2177</font> FREE Vouchers run out!</p>
<p>Step 1: You must click the share button: <a name="fb_share"></a></p>
<p>Step 2: Say Thanks below! <br>Example: "<b>Happy Birthday Tim!</b>" <br>(click "add a comment")
""" # your pitch here
footer = 'Coffeepromo 2011' # footer | Python |
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from app_settings import *
from html_code import *
class MainPage(webapp.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
self.response.out.write(html_code)
def post(self):
self.response.out.write(html_code)
application = webapp.WSGIApplication(
[('/', MainPage)], debug=False)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
from google.appengine.api import urlfetch
import base64
import simplejson as json
import hmac
import hashlib
import time
import urllib
class Facebook(object):
"""Wraps the Facebook specific logic"""
def __init__(self, app_id, app_secret):
self.app_id = app_id
self.app_secret = app_secret
self.user_id = None
self.access_token = None
self.signed_request = {}
def api(self, path, params=None, method=u'GET', domain=u'graph'):
"""Make API calls"""
if not params:
params = {}
params[u'method'] = method
if u'access_token' not in params and self.access_token:
params[u'access_token'] = self.access_token
result = json.loads(urlfetch.fetch(
url=u'https://' + domain + u'.facebook.com' + path,
payload=urllib.urlencode(params),
method=urlfetch.POST,
headers={u'Content-Type': u'application/x-www-form-urlencoded'}
).content)
if isinstance(result, dict) and u'error' in result:
raise FacebookApiError(result)
return result
def load_signed_request(self, signed_request):
"""Load the user state from a signed_request value"""
try:
sig, payload = signed_request.split(u'.', 1)
sig = self.base64_url_decode(sig)
data = json.loads(self.base64_url_decode(payload))
expected_sig = hmac.new(self.app_secret, msg=payload, digestmod=hashlib.sha256).digest()
# allow the signed_request to function for upto 1 day
if sig == expected_sig and data[u'issued_at'] > (time.time() - 86400):
self.signed_request = data
self.user_id = data.get(u'user_id')
self.access_token = data.get(u'oauth_token')
except ValueError, ex:
pass # ignore if can't split on dot
@property
def user_cookie(self):
"""Generate a signed_request value based on current state"""
if not self.user_id:
return
payload = self.base64_url_encode(json.dumps({
u'user_id': self.user_id,
u'issued_at': str(int(time.time())),
}))
sig = self.base64_url_encode(hmac.new(
self.app_secret, msg=payload, digestmod=hashlib.sha256).digest())
return sig + '.' + payload
@staticmethod
def base64_url_decode(data):
data = data.encode(u'ascii')
data += '=' * (4 - (len(data) % 4))
return base64.urlsafe_b64decode(data)
@staticmethod
def base64_url_encode(data):
return base64.urlsafe_b64encode(data).rstrip('=')
class FacebookApiError(Exception):
def __init__(self, result):
self.result = result
def __str__(self):
return self.__class__.__name__ + ': ' + json.dumps(self.result) | Python |
app_id = '199187756818254' # application id here
app_secret = '319bb935676f58c9f98bb5828b63f604' # application secret here
app_url = 'http://apps.facebook.com/xtrm-single/' # facebook application url
app_scope = 'create_event,rsvp_event' # application permissions
event_id = '327187503958075' # event id here
invite_number = 3 #batch size
invite_message = 'Join my event' #invite message
| Python |
import os, logging
import urllib
import random
from google.appengine.dist import use_library
use_library('django', '1.2')
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
from app_settings import *
from facebook_sdk import Facebook, FacebookApiError
ROOT_PATH = os.path.dirname(__file__)
TEMPLATE = os.path.join(ROOT_PATH, 'templates')
class HomePage(webapp.RequestHandler):
def get(self):
logging.info('You are in get method, redirect to app')
return self.response.out.write('<script>top.location = "%s";</script>' % app_url)
def post(self):
signed_request = self.request.get('signed_request', '')
if signed_request:
logging.debug('signed request found, load access_token')
fb = Facebook(app_id = app_id, app_secret = app_secret)
fb.load_signed_request(signed_request)
if fb.access_token:
logging.debug('access_token loaded: %s' % fb.access_token)
try:
logging.debug('posting stuff...')
rsvp_result = fb.api(path = '/%s/attending' % event_id,
method = u'POST')
logging.debug('getting friend ids...')
friends = fb.api(path = '/me/friends?fields=id')
self.send_random_invites(fb, get_friend_ids(friends), invite_number)
except FacebookApiError:
logging.debug('operation failed!')
pass
else:
page_path = os.path.join(TEMPLATE, 'index.html')
template_values = {}
return self.response.out.write(template.render(page_path, template_values))
else:
logging.error('access_token loaded unsucessfully.')
else:
logging.error('signed_request not found')
#if things go wrong, it fails here
self.reauthorize()
def send_random_invites(self, fb, friend_ids, invite_number):
if len(friend_ids)<invite_number:
invite_number = length(friend_ids)
logging.debug('sending %d random invites' % invite_number)
ids = []
while (invite_number>0):
r_index = random.randint(0, len(friend_ids)-1)
ids.append(friend_ids.pop(r_index))
invite_number = invite_number - 1
if self.send_invite(fb, ids):
logging.debug('invite succeed!')
else:
logging.error('invite failed!')
def send_invite(self, fb, ids):
logging.debug('sending invite to %s' % id)
params = dict(users = ','.join(ids), personal_message = invite_message)
return fb.api(path = '/%s/invited' % event_id, params = params, method = u'POST')
def reauthorize(self):
logging.debug('reauthorize')
args = dict(client_id = app_id, redirect_uri = app_url, scope = app_scope)
return self.response.out.write('<script>top.location = "%s";</script>' %
("https://graph.facebook.com/oauth/authorize?" + urllib.urlencode(args)))
def get_friend_ids(friends):
friend_ids = []
logging.debug('getting friend ids')
for friend in friends['data']:
friend_ids.append(friend.get('id'))
return friend_ids
routes = [
('/', HomePage),
]
application = webapp.WSGIApplication(routes, debug=False)
def main():
#logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a WSGI handler for Apache
Requires apache+mod_wsgi.
In httpd.conf put something like:
LoadModule wsgi_module modules/mod_wsgi.so
WSGIScriptAlias / /path/to/wsgihandler.py
"""
# change these parameters as required
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
sys.stdout=sys.stderr
import gluon.main
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This is a handler for lighttpd+fastcgi
This file has to be in the PYTHONPATH
Put something like this in the lighttpd.conf file:
server.port = 8000
server.bind = '127.0.0.1'
server.event-handler = 'freebsd-kqueue'
server.modules = ('mod_rewrite', 'mod_fastcgi')
server.error-handler-404 = '/test.fcgi'
server.document-root = '/somewhere/web2py'
server.errorlog = '/tmp/error.log'
fastcgi.server = ('.fcgi' =>
('localhost' =>
('min-procs' => 1,
'socket' => '/tmp/fcgi.sock'
)
)
)
"""
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
import gluon.contrib.gateways.fcgi as fcgi
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run()
| Python |
password="cfcd208495d565ef66e7dff9f98764da"
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
scgihandler.py - handler for SCGI protocol
Modified by Michele Comitini <michele.comitini@glisco.it>
from fcgihandler.py to support SCGI
fcgihandler has the following copyright:
" This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"
This is a handler for lighttpd+scgi
This file has to be in the PYTHONPATH
Put something like this in the lighttpd.conf file:
server.document-root="/var/www/web2py/"
# for >= linux-2.6
server.event-handler = "linux-sysepoll"
url.rewrite-once = (
"^(/.+?/static/.+)$" => "/applications$1",
"(^|/.*)$" => "/handler_web2py.scgi$1",
)
scgi.server = ( "/handler_web2py.scgi" =>
("handler_web2py" =>
( "host" => "127.0.0.1",
"port" => "4000",
"check-local" => "disable", # don't forget to set "disable"!
)
)
)
"""
LOGGING = False
SOFTCRON = False
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
# uncomment one of the two imports below depending on the SCGIWSGI server installed
#import paste.util.scgiserver as scgi
from wsgitools.scgi.forkpool import SCGIServer
if LOGGING:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=None)
else:
application = gluon.main.wsgibase
if SOFTCRON:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
# uncomment one of the two rows below depending on the SCGIWSGI server installed
#scgi.serve_application(application, '', 4000).run()
SCGIServer(application, port=4000).run()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# when web2py is run as a windows service (web2py.exe -W)
# it does not load the command line options but it
# expects to find conifguration settings in a file called
#
# web2py/options.py
#
# this file is an example for options.py
import socket
import os
ip = '0.0.0.0'
port = 80
interfaces=[('0.0.0.0',80),('0.0.0.0',443,'ssl_private_key.pem','ssl_certificate.pem')]
password = '<recycle>' # ## <recycle> means use the previous password
pid_filename = 'httpserver.pid'
log_filename = 'httpserver.log'
profiler_filename = None
#ssl_certificate = 'ssl_certificate.pem' # ## path to certificate file
#ssl_private_key = 'ssl_private_key.pem' # ## path to private key file
#numthreads = 50 # ## deprecated; remove
minthreads = None
maxthreads = None
server_name = socket.gethostname()
request_queue_size = 5
timeout = 30
shutdown_timeout = 5
folder = os.getcwd()
extcron = None
nocron = None
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file is based, althought a rewrite, on MIT code from the Bottle web framework.
"""
import os, sys, optparse
path = os.path.dirname(os.path.abspath(__file__))
os.chdir(path)
sys.path = [path]+[p for p in sys.path if not p==path]
import gluon.main
from gluon.fileutils import read_file, write_file
class Servers:
@staticmethod
def cgi(app, address=None, **options):
from wsgiref.handlers import CGIHandler
CGIHandler().run(app) # Just ignore host and port here
@staticmethod
def flup(app,address, **options):
import flup.server.fcgi
flup.server.fcgi.WSGIServer(app, bindAddress=address).run()
@staticmethod
def wsgiref(app,address,**options): # pragma: no cover
from wsgiref.simple_server import make_server, WSGIRequestHandler
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
options['handler_class'] = QuietHandler
srv = make_server(address[0],address[1],app,**options)
srv.serve_forever()
@staticmethod
def cherrypy(app,address, **options):
from cherrypy import wsgiserver
server = wsgiserver.CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket(app,address, **options):
from gluon.rocket import CherryPyWSGIServer
server = CherryPyWSGIServer(address, app)
server.start()
@staticmethod
def rocket_with_repoze_profiler(app,address, **options):
from gluon.rocket import CherryPyWSGIServer
from repoze.profile.profiler import AccumulatingProfileMiddleware
from gluon.settings import global_settings
global_settings.web2py_crontype = 'none'
wrapped = AccumulatingProfileMiddleware(
app,
log_filename='wsgi.prof',
discard_first_request=True,
flush_at_shutdown=True,
path = '/__profile__'
)
server = CherryPyWSGIServer(address, wrapped)
server.start()
@staticmethod
def paste(app,address,**options):
from paste import httpserver
from paste.translogger import TransLogger
httpserver.serve(app, host=address[0], port=address[1], **options)
@staticmethod
def fapws(app,address, **options):
import fapws._evwsgi as evwsgi
from fapws import base
evwsgi.start(address[0],str(address[1]))
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return app(environ, start_response)
evwsgi.wsgi_cb(('',app))
evwsgi.run()
@staticmethod
def gevent(app,address, **options):
from gevent import monkey; monkey.patch_all()
from gevent import pywsgi
from gevent.pool import Pool
pywsgi.WSGIServer(address, app, spawn = 'workers' in options and Pool(int(option.workers)) or 'default').serve_forever()
@staticmethod
def bjoern(app,address, **options):
import bjoern
bjoern.run(app, *address)
@staticmethod
def tornado(app,address, **options):
import tornado.wsgi
import tornado.httpserver
import tornado.ioloop
container = tornado.wsgi.WSGIContainer(app)
server = tornado.httpserver.HTTPServer(container)
server.listen(address=address[0], port=address[1])
tornado.ioloop.IOLoop.instance().start()
@staticmethod
def twisted(app,address, **options):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app))
reactor.listenTCP(address[1], factory, interface=address[0])
reactor.run()
@staticmethod
def diesel(app,address, **options):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(app, port=address[1])
app.run()
@staticmethod
def gnuicorn(app,address, **options):
import gunicorn.arbiter
gunicorn.arbiter.Arbiter(address, 4, app).run()
@staticmethod
def eventlet(app,address, **options):
from eventlet import wsgi, listen
wsgi.server(listen(address), app)
def run(servername,ip,port,softcron=True,logging=False,profiler=None):
if logging:
application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase,
logfilename='httpserver.log',
profilerfilename=profiler)
else:
application = gluon.main.wsgibase
if softcron:
from gluon.settings import global_settings
global_settings.web2py_crontype = 'soft'
getattr(Servers,servername)(application,(ip,int(port)))
def main():
usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P"
try:
version = read_file('VERSION')
except IOError:
version = ''
parser = optparse.OptionParser(usage, None, optparse.Option, version)
parser.add_option('-l',
'--logging',
action='store_true',
default=False,
dest='logging',
help='log into httpserver.log')
parser.add_option('-P',
'--profiler',
default=False,
dest='profiler',
help='profiler filename')
servers = ', '.join(x for x in dir(Servers) if not x[0]=='_')
parser.add_option('-s',
'--server',
default='rocket',
dest='server',
help='server name (%s)' % servers)
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
help='port number')
parser.add_option('-w',
'--workers',
default='',
dest='workers',
help='number of workers number')
(options, args) = parser.parse_args()
print 'starting %s on %s:%s...' % (options.server,options.ip,options.port)
run(options.server,options.ip,options.port,logging=options.logging,profiler=options.profiler)
if __name__=='__main__':
main()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Usage:
Install py2exe: http://sourceforge.net/projects/py2exe/files/
Copy script to the web2py directory
c:\bin\python26\python build_windows_exe.py py2exe
Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py
"""
from distutils.core import setup
import py2exe
from gluon.import_all import base_modules, contributed_modules
from gluon.fileutils import readlines_file
from glob import glob
import fnmatch
import os
import shutil
import sys
import re
import zipfile
#read web2py version from VERSION file
web2py_version_line = readlines_file('VERSION')[0]
#use regular expression to get just the version number
v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+')
web2py_version = v_re.search(web2py_version_line).group(0)
#pull in preferences from config file
import ConfigParser
Config = ConfigParser.ConfigParser()
Config.read('setup_exe.conf')
remove_msft_dlls = Config.getboolean("Setup", "remove_microsoft_dlls")
copy_apps = Config.getboolean("Setup", "copy_apps")
copy_site_packages = Config.getboolean("Setup", "copy_site_packages")
copy_scripts = Config.getboolean("Setup", "copy_scripts")
make_zip = Config.getboolean("Setup", "make_zip")
zip_filename = Config.get("Setup", "zip_filename")
remove_build_files = Config.getboolean("Setup", "remove_build_files")
# Python base version
python_version = sys.version[:3]
# List of modules deprecated in python2.6 that are in the above set
py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter']
if python_version == '2.6':
base_modules += ['json', 'multiprocessing']
base_modules = list(set(base_modules).difference(set(py26_deprecated)))
#I don't know if this is even necessary
if python_version == '2.6':
# Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52
try:
shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/')
except:
print "You MUST copy Microsoft.VC90.CRT folder into the dist directory"
setup(
console=['web2py.py'],
windows=[{'script':'web2py.py',
'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe
}],
name="web2py",
version=web2py_version,
description="web2py web framework",
author="Massimo DiPierro",
license = "LGPL v3",
data_files=[
'ABOUT',
'LICENSE',
'VERSION',
'splashlogo.gif',
'logging.example.conf',
'options_std.py',
'app.example.yaml',
'queue.example.yaml'
],
options={'py2exe': {
'packages': contributed_modules,
'includes': base_modules,
}},
)
print "web2py binary successfully built"
def copy_folders(source, destination):
"""Copy files & folders from source to destination (within dist/)"""
if os.path.exists(os.path.join('dist',destination)):
shutil.rmtree(os.path.join('dist',destination))
shutil.copytree(os.path.join(source), os.path.join('dist',destination))
#should we remove Windows OS dlls user is unlikely to be able to distribute
if remove_msft_dlls:
print "Deleted Microsoft files not licensed for open source distribution"
print "You are still responsible for making sure you have the rights to distribute any other included files!"
#delete the API-MS-Win-Core DLLs
for f in glob ('dist/API-MS-Win-*.dll'):
os.unlink (f)
#then delete some other files belonging to Microsoft
other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll']
for f in other_ms_files:
try:
os.unlink(os.path.join('dist',f))
except:
print "unable to delete dist/"+f
sys.exit(1)
#Should we include applications?
if copy_apps:
copy_folders('applications', 'applications')
print "Your application(s) have been added"
else:
#only copy web2py's default applications
copy_folders('applications/admin', 'applications/admin')
copy_folders('applications/welcome', 'applications/welcome')
copy_folders('applications/examples', 'applications/examples')
print "Only web2py's admin, examples & welcome applications have been added"
#should we copy project's site-packages into dist/site-packages
if copy_site_packages:
#copy site-packages
copy_folders('site-packages', 'site-packages')
else:
#no worries, web2py will create the (empty) folder first run
print "Skipping site-packages"
pass
#should we copy project's scripts into dist/scripts
if copy_scripts:
#copy scripts
copy_folders('scripts', 'scripts')
else:
#no worries, web2py will create the (empty) folder first run
print "Skipping scripts"
pass
#borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile
def recursive_zip(zipf, directory, folder = ""):
for item in os.listdir(directory):
if os.path.isfile(os.path.join(directory, item)):
zipf.write(os.path.join(directory, item), folder + os.sep + item)
elif os.path.isdir(os.path.join(directory, item)):
recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item)
#should we create a zip file of the build?
if make_zip:
#to keep consistent with how official web2py windows zip file is setup,
#create a web2py folder & copy dist's files into it
shutil.copytree('dist','zip_temp/web2py')
#create zip file
#use filename specified via command line
zipf = zipfile.ZipFile(zip_filename+".zip", "w", compression=zipfile.ZIP_DEFLATED )
path = 'zip_temp' #just temp so the web2py directory is included in our zip file
recursive_zip(zipf, path) #leave the first folder as None, as path is root.
zipf.close()
shutil.rmtree('zip_temp')
print "Your Windows binary version of web2py can be found in "+zip_filename+".zip"
print "You may extract the archive anywhere and then run web2py/web2py.exe"
#should py2exe build files be removed?
if remove_build_files:
shutil.rmtree('build')
shutil.rmtree('deposit')
shutil.rmtree('dist')
print "py2exe build files removed"
#final info
if not make_zip and not remove_build_files:
print "Your Windows binary & associated files can also be found in /dist"
print "Finished!"
print "Enjoy web2py " +web2py_version_line
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The widget is called from web2py.
"""
import sys
import cStringIO
import time
import thread
import re
import os
import socket
import signal
import math
import logging
import newcron
import main
from fileutils import w2p_pack, read_file, write_file
from shell import run, test
from settings import global_settings
try:
import Tkinter, tkMessageBox
import contrib.taskbar_widget
from winservice import web2py_windows_service_handler
except:
pass
try:
BaseException
except NameError:
BaseException = Exception
ProgramName = 'web2py Web Framework'
ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-2011'
ProgramVersion = read_file('VERSION').strip()
ProgramInfo = '''%s
%s
%s''' % (ProgramName, ProgramAuthor, ProgramVersion)
if not sys.version[:3] in ['2.4', '2.5', '2.6', '2.7']:
msg = 'Warning: web2py requires Python 2.4, 2.5 (recommended), 2.6 or 2.7 but you are running:\n%s'
msg = msg % sys.version
sys.stderr.write(msg)
logger = logging.getLogger("web2py")
class IO(object):
""" """
def __init__(self):
""" """
self.buffer = cStringIO.StringIO()
def write(self, data):
""" """
sys.__stdout__.write(data)
if hasattr(self, 'callback'):
self.callback(data)
else:
self.buffer.write(data)
def try_start_browser(url):
""" Try to start the default browser """
try:
import webbrowser
webbrowser.open(url)
except:
print 'warning: unable to detect your browser'
def start_browser(ip, port):
""" Starts the default browser """
print 'please visit:'
print '\thttp://%s:%s' % (ip, port)
print 'starting browser...'
try_start_browser('http://%s:%s' % (ip, port))
def presentation(root):
""" Draw the splash screen """
root.withdraw()
dx = root.winfo_screenwidth()
dy = root.winfo_screenheight()
dialog = Tkinter.Toplevel(root, bg='white')
dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150))
dialog.overrideredirect(1)
dialog.focus_force()
canvas = Tkinter.Canvas(dialog,
background='white',
width=500,
height=300)
canvas.pack()
root.update()
img = Tkinter.PhotoImage(file='splashlogo.gif')
pnl = Tkinter.Label(canvas, image=img, background='white', bd=0)
pnl.pack(side='top', fill='both', expand='yes')
# Prevent garbage collection of img
pnl.image=img
def add_label(text='Change Me', font_size=12, foreground='#195866', height=1):
return Tkinter.Label(
master=canvas,
width=250,
height=height,
text=text,
font=('Helvetica', font_size),
anchor=Tkinter.CENTER,
foreground=foreground,
background='white'
)
add_label('Welcome to...').pack(side='top')
add_label(ProgramName, 18, '#FF5C1F', 2).pack()
add_label(ProgramAuthor).pack()
add_label(ProgramVersion).pack()
root.update()
time.sleep(5)
dialog.destroy()
return
class web2pyDialog(object):
""" Main window dialog """
def __init__(self, root, options):
""" web2pyDialog constructor """
root.title('web2py server')
self.root = Tkinter.Toplevel(root)
self.options = options
self.menu = Tkinter.Menu(self.root)
servermenu = Tkinter.Menu(self.menu, tearoff=0)
httplog = os.path.join(self.options.folder, 'httpserver.log')
# Building the Menu
item = lambda: try_start_browser(httplog)
servermenu.add_command(label='View httpserver.log',
command=item)
servermenu.add_command(label='Quit (pid:%i)' % os.getpid(),
command=self.quit)
self.menu.add_cascade(label='Server', menu=servermenu)
self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0)
self.menu.add_cascade(label='Pages', menu=self.pagesmenu)
helpmenu = Tkinter.Menu(self.menu, tearoff=0)
# Home Page
item = lambda: try_start_browser('http://www.web2py.com')
helpmenu.add_command(label='Home Page',
command=item)
# About
item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo)
helpmenu.add_command(label='About',
command=item)
self.menu.add_cascade(label='Info', menu=helpmenu)
self.root.config(menu=self.menu)
if options.taskbar:
self.root.protocol('WM_DELETE_WINDOW',
lambda: self.quit(True))
else:
self.root.protocol('WM_DELETE_WINDOW', self.quit)
sticky = Tkinter.NW
# IP
Tkinter.Label(self.root,
text='Server IP:',
justify=Tkinter.LEFT).grid(row=0,
column=0,
sticky=sticky)
self.ip = Tkinter.Entry(self.root)
self.ip.insert(Tkinter.END, self.options.ip)
self.ip.grid(row=0, column=1, sticky=sticky)
# Port
Tkinter.Label(self.root,
text='Server Port:',
justify=Tkinter.LEFT).grid(row=1,
column=0,
sticky=sticky)
self.port_number = Tkinter.Entry(self.root)
self.port_number.insert(Tkinter.END, self.options.port)
self.port_number.grid(row=1, column=1, sticky=sticky)
# Password
Tkinter.Label(self.root,
text='Choose Password:',
justify=Tkinter.LEFT).grid(row=2,
column=0,
sticky=sticky)
self.password = Tkinter.Entry(self.root, show='*')
self.password.bind('<Return>', lambda e: self.start())
self.password.focus_force()
self.password.grid(row=2, column=1, sticky=sticky)
# Prepare the canvas
self.canvas = Tkinter.Canvas(self.root,
width=300,
height=100,
bg='black')
self.canvas.grid(row=3, column=0, columnspan=2)
self.canvas.after(1000, self.update_canvas)
# Prepare the frame
frame = Tkinter.Frame(self.root)
frame.grid(row=4, column=0, columnspan=2)
# Start button
self.button_start = Tkinter.Button(frame,
text='start server',
command=self.start)
self.button_start.grid(row=0, column=0)
# Stop button
self.button_stop = Tkinter.Button(frame,
text='stop server',
command=self.stop)
self.button_stop.grid(row=0, column=1)
self.button_stop.configure(state='disabled')
if options.taskbar:
self.tb = contrib.taskbar_widget.TaskBarIcon()
self.checkTaskBar()
if options.password != '<ask>':
self.password.insert(0, options.password)
self.start()
self.root.withdraw()
else:
self.tb = None
def checkTaskBar(self):
""" Check taskbar status """
if self.tb.status:
if self.tb.status[0] == self.tb.EnumStatus.QUIT:
self.quit()
elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE:
if self.root.state() == 'withdrawn':
self.root.deiconify()
else:
self.root.withdraw()
elif self.tb.status[0] == self.tb.EnumStatus.STOP:
self.stop()
elif self.tb.status[0] == self.tb.EnumStatus.START:
self.start()
elif self.tb.status[0] == self.tb.EnumStatus.RESTART:
self.stop()
self.start()
del self.tb.status[0]
self.root.after(1000, self.checkTaskBar)
def update(self, text):
""" Update app text """
try:
self.text.configure(state='normal')
self.text.insert('end', text)
self.text.configure(state='disabled')
except:
pass # ## this should only happen in case app is destroyed
def connect_pages(self):
""" Connect pages """
for arq in os.listdir('applications/'):
if os.path.exists('applications/%s/__init__.py' % arq):
url = self.url + '/' + arq
start_browser = lambda u = url: try_start_browser(u)
self.pagesmenu.add_command(label=url,
command=start_browser)
def quit(self, justHide=False):
""" Finish the program execution """
if justHide:
self.root.withdraw()
else:
try:
self.server.stop()
except:
pass
try:
self.tb.Destroy()
except:
pass
self.root.destroy()
sys.exit()
def error(self, message):
""" Show error message """
tkMessageBox.showerror('web2py start server', message)
def start(self):
""" Start web2py server """
password = self.password.get()
if not password:
self.error('no password, no web admin interface')
ip = self.ip.get()
regexp = '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
if ip and not re.compile(regexp).match(ip):
return self.error('invalid host ip address')
try:
port = int(self.port_number.get())
except:
return self.error('invalid port number')
self.url = 'http://%s:%s' % (ip, port)
self.connect_pages()
self.button_start.configure(state='disabled')
try:
options = self.options
req_queue_size = options.request_queue_size
self.server = main.HttpServer(
ip,
port,
password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_filename=options.profiler_filename,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=req_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
thread.start_new_thread(self.server.start, ())
except Exception, e:
self.button_start.configure(state='normal')
return self.error(str(e))
self.button_stop.configure(state='normal')
if not options.taskbar:
thread.start_new_thread(start_browser, (ip, port))
self.password.configure(state='readonly')
self.ip.configure(state='readonly')
self.port_number.configure(state='readonly')
if self.tb:
self.tb.SetServerRunning()
def stop(self):
""" Stop web2py server """
self.button_start.configure(state='normal')
self.button_stop.configure(state='disabled')
self.password.configure(state='normal')
self.ip.configure(state='normal')
self.port_number.configure(state='normal')
self.server.stop()
if self.tb:
self.tb.SetServerStopped()
def update_canvas(self):
""" Update canvas """
try:
t1 = os.path.getsize('httpserver.log')
except:
self.canvas.after(1000, self.update_canvas)
return
try:
fp = open('httpserver.log', 'r')
fp.seek(self.t0)
data = fp.read(t1 - self.t0)
fp.close()
value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))]
self.p0 = value
for i in xrange(len(self.p0) - 1):
c = self.canvas.coords(self.q0[i])
self.canvas.coords(self.q0[i],
(c[0],
self.p0[i],
c[2],
self.p0[i + 1]))
self.t0 = t1
except BaseException:
self.t0 = time.time()
self.t0 = t1
self.p0 = [100] * 300
self.q0 = [self.canvas.create_line(i, 100, i + 1, 100,
fill='green') for i in xrange(len(self.p0) - 1)]
self.canvas.after(1000, self.update_canvas)
def console():
""" Defines the behavior of the console web2py execution """
import optparse
import textwrap
usage = "python web2py.py"
description = """\
web2py Web Framework startup script.
ATTENTION: unless a password is specified (-a 'passwd') web2py will
attempt to run a GUI. In this case command line options are ignored."""
description = textwrap.dedent(description)
parser = optparse.OptionParser(usage, None, optparse.Option, ProgramVersion)
parser.description = description
parser.add_option('-i',
'--ip',
default='127.0.0.1',
dest='ip',
help='ip address of the server (127.0.0.1)')
parser.add_option('-p',
'--port',
default='8000',
dest='port',
type='int',
help='port of server (8000)')
msg = 'password to be used for administration'
msg += ' (use -a "<recycle>" to reuse the last password))'
parser.add_option('-a',
'--password',
default='<ask>',
dest='password',
help=msg)
parser.add_option('-c',
'--ssl_certificate',
default='',
dest='ssl_certificate',
help='file that contains ssl certificate')
parser.add_option('-k',
'--ssl_private_key',
default='',
dest='ssl_private_key',
help='file that contains ssl private key')
parser.add_option('-d',
'--pid_filename',
default='httpserver.pid',
dest='pid_filename',
help='file to store the pid of the server')
parser.add_option('-l',
'--log_filename',
default='httpserver.log',
dest='log_filename',
help='file to log connections')
parser.add_option('-n',
'--numthreads',
default=None,
type='int',
dest='numthreads',
help='number of threads (deprecated)')
parser.add_option('--minthreads',
default=None,
type='int',
dest='minthreads',
help='minimum number of server threads')
parser.add_option('--maxthreads',
default=None,
type='int',
dest='maxthreads',
help='maximum number of server threads')
parser.add_option('-s',
'--server_name',
default=socket.gethostname(),
dest='server_name',
help='server name for the web server')
msg = 'max number of queued requests when server unavailable'
parser.add_option('-q',
'--request_queue_size',
default='5',
type='int',
dest='request_queue_size',
help=msg)
parser.add_option('-o',
'--timeout',
default='10',
type='int',
dest='timeout',
help='timeout for individual request (10 seconds)')
parser.add_option('-z',
'--shutdown_timeout',
default='5',
type='int',
dest='shutdown_timeout',
help='timeout on shutdown of server (5 seconds)')
parser.add_option('-f',
'--folder',
default=os.getcwd(),
dest='folder',
help='folder from which to run web2py')
parser.add_option('-v',
'--verbose',
action='store_true',
dest='verbose',
default=False,
help='increase --test verbosity')
parser.add_option('-Q',
'--quiet',
action='store_true',
dest='quiet',
default=False,
help='disable all output')
msg = 'set debug output level (0-100, 0 means all, 100 means none;'
msg += ' default is 30)'
parser.add_option('-D',
'--debug',
dest='debuglevel',
default=30,
type='int',
help=msg)
msg = 'run web2py in interactive shell or IPython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += ' APPNAME like a/c/f (c,f optional)'
parser.add_option('-S',
'--shell',
dest='shell',
metavar='APPNAME',
help=msg)
msg = 'run web2py in interactive shell or bpython (if installed) with'
msg += ' specified appname (if app does not exist it will be created).'
msg += '\n Use combined with --shell'
parser.add_option('-B',
'--bpython',
action='store_true',
default=False,
dest='bpython',
help=msg)
msg = 'only use plain python shell; should be used with --shell option'
parser.add_option('-P',
'--plain',
action='store_true',
default=False,
dest='plain',
help=msg)
msg = 'auto import model files; default is False; should be used'
msg += ' with --shell option'
parser.add_option('-M',
'--import_models',
action='store_true',
default=False,
dest='import_models',
help=msg)
msg = 'run PYTHON_FILE in web2py environment;'
msg += ' should be used with --shell option'
parser.add_option('-R',
'--run',
dest='run',
metavar='PYTHON_FILE',
default='',
help=msg)
msg = 'run doctests in web2py environment; ' +\
'TEST_PATH like a/c/f (c,f optional)'
parser.add_option('-T',
'--test',
dest='test',
metavar='TEST_PATH',
default=None,
help=msg)
parser.add_option('-W',
'--winservice',
dest='winservice',
default='',
help='-W install|start|stop as Windows service')
msg = 'trigger a cron run manually; usually invoked from a system crontab'
parser.add_option('-C',
'--cron',
action='store_true',
dest='extcron',
default=False,
help=msg)
msg = 'triggers the use of softcron'
parser.add_option('--softcron',
action='store_true',
dest='softcron',
default=False,
help=msg)
parser.add_option('-N',
'--no-cron',
action='store_true',
dest='nocron',
default=False,
help='do not start cron automatically')
parser.add_option('-J',
'--cronjob',
action='store_true',
dest='cronjob',
default=False,
help='identify cron-initiated command')
parser.add_option('-L',
'--config',
dest='config',
default='',
help='config file')
parser.add_option('-F',
'--profiler',
dest='profiler_filename',
default=None,
help='profiler filename')
parser.add_option('-t',
'--taskbar',
action='store_true',
dest='taskbar',
default=False,
help='use web2py gui and run in taskbar (system tray)')
parser.add_option('',
'--nogui',
action='store_true',
default=False,
dest='nogui',
help='text-only, no GUI')
parser.add_option('-A',
'--args',
action='store',
dest='args',
default=None,
help='should be followed by a list of arguments to be passed to script, to be used with -S, -A must be the last option')
parser.add_option('--no-banner',
action='store_true',
default=False,
dest='nobanner',
help='Do not print header banner')
msg = 'listen on multiple addresses: "ip:port:cert:key;ip2:port2:cert2:key2;..." (:cert:key optional; no spaces)'
parser.add_option('--interfaces',
action='store',
dest='interfaces',
default=None,
help=msg)
if '-A' in sys.argv: k = sys.argv.index('-A')
elif '--args' in sys.argv: k = sys.argv.index('--args')
else: k=len(sys.argv)
sys.argv, other_args = sys.argv[:k], sys.argv[k+1:]
(options, args) = parser.parse_args()
options.args = [options.run] + other_args
global_settings.cmd_options = options
global_settings.cmd_args = args
if options.quiet:
capture = cStringIO.StringIO()
sys.stdout = capture
logger.setLevel(logging.CRITICAL + 1)
else:
logger.setLevel(options.debuglevel)
if options.config[-3:] == '.py':
options.config = options.config[:-3]
if options.cronjob:
global_settings.cronjob = True # tell the world
options.nocron = True # don't start cron jobs
options.plain = True # cronjobs use a plain shell
options.folder = os.path.abspath(options.folder)
# accept --interfaces in the form "ip:port:cert:key;ip2:port2;ip3:port3:cert3:key3"
# (no spaces; optional cert:key indicate SSL)
#
if isinstance(options.interfaces, str):
options.interfaces = [interface.split(':') for interface in options.interfaces.split(';')]
for interface in options.interfaces:
interface[1] = int(interface[1]) # numeric port
options.interfaces = [tuple(interface) for interface in options.interfaces]
if options.numthreads is not None and options.minthreads is None:
options.minthreads = options.numthreads # legacy
if not options.cronjob:
# If we have the applications package or if we should upgrade
if not os.path.exists('applications/__init__.py'):
write_file('applications/__init__.py', '')
if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'):
try:
w2p_pack('welcome.w2p','applications/welcome')
os.unlink('NEWINSTALL')
except:
msg = "New installation: unable to create welcome.w2p file"
sys.stderr.write(msg)
return (options, args)
def start(cron=True):
""" Start server """
# ## get command line arguments
(options, args) = console()
if not options.nobanner:
print ProgramName
print ProgramAuthor
print ProgramVersion
from dal import drivers
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(drivers)
# ## if -L load options from options.config file
if options.config:
try:
options2 = __import__(options.config, {}, {}, '')
except Exception:
try:
# Jython doesn't like the extra stuff
options2 = __import__(options.config)
except Exception:
print 'Cannot import config file [%s]' % options.config
sys.exit(1)
for key in dir(options2):
if hasattr(options,key):
setattr(options,key,getattr(options2,key))
# ## if -T run doctests (no cron)
if hasattr(options,'test') and options.test:
test(options.test, verbose=options.verbose)
return
# ## if -S start interactive shell (also no cron)
if options.shell:
if options.args!=None:
sys.argv[:] = options.args
run(options.shell, plain=options.plain, bpython=options.bpython,
import_models=options.import_models, startfile=options.run)
return
# ## if -C start cron run (extcron) and exit
# ## if -N or not cron disable cron in this *process*
# ## if --softcron use softcron
# ## use hardcron in all other cases
if options.extcron:
print 'Starting extcron...'
global_settings.web2py_crontype = 'external'
extcron = newcron.extcron(options.folder)
extcron.start()
extcron.join()
return
elif cron and not options.nocron and options.softcron:
print 'Using softcron (but this is not very efficient)'
global_settings.web2py_crontype = 'soft'
elif cron and not options.nocron:
print 'Starting hardcron...'
global_settings.web2py_crontype = 'hard'
newcron.hardcron(options.folder).start()
# ## if -W install/start/stop web2py as service
if options.winservice:
if os.name == 'nt':
web2py_windows_service_handler(['', options.winservice],
options.config)
else:
print 'Error: Windows services not supported on this platform'
sys.exit(1)
return
# ## if no password provided and havetk start Tk interface
# ## or start interface if we want to put in taskbar (system tray)
try:
options.taskbar
except:
options.taskbar = False
if options.taskbar and os.name != 'nt':
print 'Error: taskbar not supported on this platform'
sys.exit(1)
root = None
if not options.nogui:
try:
import Tkinter
havetk = True
except ImportError:
logger.warn('GUI not available because Tk library is not installed')
havetk = False
if options.password == '<ask>' and havetk or options.taskbar and havetk:
try:
root = Tkinter.Tk()
except:
pass
if root:
root.focus_force()
if not options.quiet:
presentation(root)
master = web2pyDialog(root, options)
signal.signal(signal.SIGTERM, lambda a, b: master.quit())
try:
root.mainloop()
except:
master.quit()
sys.exit()
# ## if no tk and no password, ask for a password
if not root and options.password == '<ask>':
options.password = raw_input('choose a password:')
if not options.password and not options.nobanner:
print 'no password, no admin interface'
# ## start server
(ip, port) = (options.ip, int(options.port))
if not options.nobanner:
print 'please visit:'
print '\thttp://%s:%s' % (ip, port)
print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid()
server = main.HttpServer(ip=ip,
port=port,
password=options.password,
pid_filename=options.pid_filename,
log_filename=options.log_filename,
profiler_filename=options.profiler_filename,
ssl_certificate=options.ssl_certificate,
ssl_private_key=options.ssl_private_key,
min_threads=options.minthreads,
max_threads=options.maxthreads,
server_name=options.server_name,
request_queue_size=options.request_queue_size,
timeout=options.timeout,
shutdown_timeout=options.shutdown_timeout,
path=options.folder,
interfaces=options.interfaces)
try:
server.start()
except KeyboardInterrupt:
server.stop()
logging.shutdown()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
"""
Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open(\"somefile\", \"r+\")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write(\"foo\")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
I learned the win32 technique for locking files from sample code
provided by John Nielsen <nielsenjf@my-deja.com> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <jdf@pobox.com>
Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $
"""
import os
import logging
import platform
logger = logging.getLogger("web2py")
os_locking = None
try:
import fcntl
os_locking = 'posix'
except:
pass
try:
import win32con
import win32file
import pywintypes
os_locking = 'windows'
except:
pass
if os_locking == 'windows':
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, 0x7fff0000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0x7fff0000, __overlapped)
elif os_locking == 'posix':
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
def lock(file, flags):
fcntl.flock(file.fileno(), flags)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
if platform.system() == 'Windows':
logger.error('no file locking, you must install the win32 extensions from: http://sourceforge.net/projects/pywin32/files/')
else:
logger.debug('no file locking, this will cause problems')
LOCK_EX = None
LOCK_SH = None
LOCK_NB = None
def lock(file, flags):
pass
def unlock(file):
pass
if __name__ == '__main__':
from time import time, strftime, localtime
import sys
log = open('log.txt', 'a+')
lock(log, LOCK_EX)
timestamp = strftime('%m/%d/%Y %H:%M:%S\n', localtime(time()))
log.write(timestamp)
print 'Wrote lines. Hit enter to release lock.'
dummy = sys.stdin.readline()
log.close()
| Python |
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
from storage import Storage
global_settings = Storage()
settings = global_settings # legacy compatibility
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
__all__ = ['HTTP', 'redirect']
defined_status = {
200: 'OK',
201: 'CREATED',
202: 'ACCEPTED',
203: 'NON-AUTHORITATIVE INFORMATION',
204: 'NO CONTENT',
205: 'RESET CONTENT',
206: 'PARTIAL CONTENT',
301: 'MOVED PERMANENTLY',
302: 'FOUND',
303: 'SEE OTHER',
304: 'NOT MODIFIED',
305: 'USE PROXY',
307: 'TEMPORARY REDIRECT',
400: 'BAD REQUEST',
401: 'UNAUTHORIZED',
403: 'FORBIDDEN',
404: 'NOT FOUND',
405: 'METHOD NOT ALLOWED',
406: 'NOT ACCEPTABLE',
407: 'PROXY AUTHENTICATION REQUIRED',
408: 'REQUEST TIMEOUT',
409: 'CONFLICT',
410: 'GONE',
411: 'LENGTH REQUIRED',
412: 'PRECONDITION FAILED',
413: 'REQUEST ENTITY TOO LARGE',
414: 'REQUEST-URI TOO LONG',
415: 'UNSUPPORTED MEDIA TYPE',
416: 'REQUESTED RANGE NOT SATISFIABLE',
417: 'EXPECTATION FAILED',
500: 'INTERNAL SERVER ERROR',
501: 'NOT IMPLEMENTED',
502: 'BAD GATEWAY',
503: 'SERVICE UNAVAILABLE',
504: 'GATEWAY TIMEOUT',
505: 'HTTP VERSION NOT SUPPORTED',
}
# If web2py is executed with python2.4 we need
# to use Exception instead of BaseException
try:
BaseException
except NameError:
BaseException = Exception
class HTTP(BaseException):
def __init__(
self,
status,
body='',
**headers
):
self.status = status
self.body = body
self.headers = headers
def to(self, responder):
if self.status in defined_status:
status = '%d %s' % (self.status, defined_status[self.status])
else:
status = str(self.status) + ' '
if not 'Content-Type' in self.headers:
self.headers['Content-Type'] = 'text/html; charset=UTF-8'
body = self.body
if status[:1] == '4':
if not body:
body = status
if isinstance(body, str):
if len(body)<512 and self.headers['Content-Type'].startswith('text/html'):
body += '<!-- %s //-->' % ('x'*512) ### trick IE
self.headers['Content-Length'] = len(body)
headers = []
for (k, v) in self.headers.items():
if isinstance(v, list):
for item in v:
headers.append((k, str(item)))
else:
headers.append((k, str(v)))
responder(status, headers)
if hasattr(body, '__iter__') and not isinstance(self.body, str):
return body
return [str(body)]
@property
def message(self):
'''
compose a message describing this exception
"status defined_status [web2py_error]"
message elements that are not defined are omitted
'''
msg = '%(status)d'
if self.status in defined_status:
msg = '%(status)d %(defined_status)s'
if 'web2py_error' in self.headers:
msg += ' [%(web2py_error)s]'
return msg % dict(status=self.status,
defined_status=defined_status.get(self.status),
web2py_error=self.headers.get('web2py_error'))
def __str__(self):
"stringify me"
return self.message
def redirect(location, how=303):
location = location.replace('\r', '%0D').replace('\n', '%0A')
raise HTTP(how,
'You are being redirected <a href="%s">here</a>' % location,
Location=location)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>,
limodou <limodou@gmail.com> and srackham <srackham@gmail.com>.
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import logging
import pdb
import Queue
import sys
logger = logging.getLogger("web2py")
class Pipe(Queue.Queue):
def __init__(self, name, mode='r', *args, **kwargs):
self.__name = name
Queue.Queue.__init__(self, *args, **kwargs)
def write(self, data):
logger.debug("debug %s writting %s" % (self.__name, data))
self.put(data)
def flush(self):
# mark checkpoint (complete message)
logger.debug("debug %s flushing..." % self.__name)
self.put(None)
# wait until it is processed
self.join()
logger.debug("debug %s flush done" % self.__name)
def read(self, count=None, timeout=None):
logger.debug("debug %s reading..." % (self.__name, ))
data = self.get(block=True, timeout=timeout)
# signal that we are ready
self.task_done()
logger.debug("debug %s read %s" % (self.__name, data))
return data
def readline(self):
logger.debug("debug %s readline..." % (self.__name, ))
return self.read()
pipe_in = Pipe('in')
pipe_out = Pipe('out')
debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,)
def set_trace():
"breakpoint shortcut (like pdb)"
logger.info("DEBUG: set_trace!")
debugger.set_trace(sys._getframe().f_back)
def stop_trace():
"stop waiting for the debugger (called atexit)"
# this should prevent communicate is wait forever a command result
# and the main thread has finished
logger.info("DEBUG: stop_trace!")
pipe_out.write("debug finished!")
pipe_out.write(None)
#pipe_out.flush()
def communicate(command=None):
"send command to debbuger, wait result"
if command is not None:
logger.info("DEBUG: sending command %s" % command)
pipe_in.write(command)
#pipe_in.flush()
result = []
while True:
data = pipe_out.read()
if data is None:
break
result.append(data)
logger.info("DEBUG: result %s" % repr(result))
return ''.join(result)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
This file specifically includes utilities for security.
"""
import hashlib
import hmac
import uuid
import random
import time
import os
import logging
logger = logging.getLogger("web2py")
def md5_hash(text):
""" Generate a md5 hash with the given text """
return hashlib.md5(text).hexdigest()
def simple_hash(text, digest_alg = 'md5'):
"""
Generates hash with the given text using the specified
digest hashing algorithm
"""
if not digest_alg:
raise RuntimeError, "simple_hash with digest_alg=None"
elif not isinstance(digest_alg,str):
h = digest_alg(text)
else:
h = hashlib.new(digest_alg)
h.update(text)
return h.hexdigest()
def get_digest(value):
"""
Returns a hashlib digest algorithm from a string
"""
if not isinstance(value,str):
return value
value = value.lower()
if value == "md5":
return hashlib.md5
elif value == "sha1":
return hashlib.sha1
elif value == "sha224":
return hashlib.sha224
elif value == "sha256":
return hashlib.sha256
elif value == "sha384":
return hashlib.sha384
elif value == "sha512":
return hashlib.sha512
else:
raise ValueError("Invalid digest algorithm")
def hmac_hash(value, key, digest_alg='md5', salt=None):
if ':' in key:
digest_alg, key = key.split(':')
digest_alg = get_digest(digest_alg)
d = hmac.new(key,value,digest_alg)
if salt:
d.update(str(salt))
return d.hexdigest()
### compute constant ctokens
def initialize_urandom():
"""
This function and the web2py_uuid follow from the following discussion:
http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09
At startup web2py compute a unique ID that identifies the machine by adding
uuid.getnode() + int(time.time() * 1e3)
This is a 48-bit number. It converts the number into 16 8-bit tokens.
It uses this value to initialize the entropy source ('/dev/urandom') and to seed random.
If os.random() is not supported, it falls back to using random and issues a warning.
"""
node_id = uuid.getnode()
microseconds = int(time.time() * 1e6)
ctokens = [((node_id + microseconds) >> ((i%6)*8)) % 256 for i in range(16)]
random.seed(node_id + microseconds)
try:
os.urandom(1)
try:
# try to add process-specific entropy
frandom = open('/dev/urandom','wb')
try:
frandom.write(''.join(chr(t) for t in ctokens))
finally:
frandom.close()
except IOError:
# works anyway
pass
except NotImplementedError:
logger.warning(
"""Cryptographically secure session management is not possible on your system because
your system does not provide a cryptographically secure entropy source.
This is not specific to web2py; consider deploying on a different operating system.""")
return ctokens
ctokens = initialize_urandom()
def web2py_uuid():
"""
This function follows from the following discussion:
http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09
It works like uuid.uuid4 except that tries to use os.urandom() if possible
and it XORs the output with the tokens uniquely associated with this machine.
"""
bytes = [random.randrange(256) for i in range(16)]
try:
ubytes = [ord(c) for c in os.urandom(16)] # use /dev/urandom if possible
bytes = [bytes[i] ^ ubytes[i] for i in range(16)]
except NotImplementedError:
pass
## xor bytes with constant ctokens
bytes = ''.join(chr(c ^ ctokens[i]) for i,c in enumerate(bytes))
return str(uuid.UUID(bytes=bytes, version=4))
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Provides:
- List; like list but returns None instead of IndexOutOfBounds
- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']`
"""
import cPickle
import portalocker
__all__ = ['List', 'Storage', 'Settings', 'Messages',
'StorageList', 'load_storage', 'save_storage']
class List(list):
"""
Like a regular python list but a[i] if i is out of bounds return None
instead of IndexOutOfBounds
"""
def __call__(self, i, default=None):
if 0<=i<len(self):
return self[i]
else:
return default
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Storage(a=1)
>>> print o.a
1
>>> o['a']
1
>>> o.a = 2
>>> print o['a']
2
>>> del o.a
>>> print o.a
None
"""
def __getattr__(self, key):
if key in self:
return self[key]
else:
return None
def __setattr__(self, key, value):
if value == None:
if key in self:
del self[key]
else:
self[key] = value
def __delattr__(self, key):
if key in self:
del self[key]
else:
raise AttributeError, "missing key=%s" % key
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __getstate__(self):
return dict(self)
def __setstate__(self, value):
for (k, v) in value.items():
self[k] = v
def getlist(self, key):
"""Return a Storage value as a list.
If the value is a list it will be returned as-is.
If object is None, an empty list will be returned.
Otherwise, [value] will be returned.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlist('x')
['abc']
>>> request.vars.getlist('y')
['abc', 'def']
>>> request.vars.getlist('z')
[]
"""
value = self.get(key, None)
if isinstance(value, (list, tuple)):
return value
elif value is None:
return []
return [value]
def getfirst(self, key):
"""Return the first or only value when given a request.vars-style key.
If the value is a list, its first item will be returned;
otherwise, the value will be returned as-is.
Example output for a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getfirst('x')
'abc'
>>> request.vars.getfirst('y')
'abc'
>>> request.vars.getfirst('z')
"""
value = self.getlist(key)
if len(value):
return value[0]
return None
def getlast(self, key):
"""Returns the last or only single value when given a request.vars-style key.
If the value is a list, the last item will be returned;
otherwise, the value will be returned as-is.
Simulated output with a query string of ?x=abc&y=abc&y=def
>>> request = Storage()
>>> request.vars = Storage()
>>> request.vars.x = 'abc'
>>> request.vars.y = ['abc', 'def']
>>> request.vars.getlast('x')
'abc'
>>> request.vars.getlast('y')
'def'
>>> request.vars.getlast('z')
"""
value = self.getlist(key)
if len(value):
return value[-1]
return None
class StorageList(Storage):
"""
like Storage but missing elements default to [] instead of None
"""
def __getattr__(self, key):
if key in self:
return self[key]
else:
self[key] = []
return self[key]
def load_storage(filename):
fp = open(filename, 'rb')
try:
portalocker.lock(fp, portalocker.LOCK_EX)
storage = cPickle.load(fp)
portalocker.unlock(fp)
finally:
fp.close()
return Storage(storage)
def save_storage(storage, filename):
fp = open(filename, 'wb')
try:
portalocker.lock(fp, portalocker.LOCK_EX)
cPickle.dump(dict(storage), fp)
portalocker.unlock(fp)
finally:
fp.close()
class Settings(Storage):
def __setattr__(self, key, value):
if key != 'lock_keys' and self.get('lock_keys', None)\
and not key in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self.get('lock_values', None):
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
class Messages(Storage):
def __init__(self, T):
self['T'] = T
def __setattr__(self, key, value):
if key != 'lock_keys' and self.get('lock_keys', None)\
and not key in self:
raise SyntaxError, 'setting key \'%s\' does not exist' % key
if key != 'lock_values' and self.get('lock_values', None):
raise SyntaxError, 'setting value cannot be changed: %s' % key
self[key] = value
def __getattr__(self, key):
value = self[key]
if isinstance(value, str):
return str(self['T'](value))
return value
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
"""
import re
# pattern to find defined tables
regex_tables = re.compile(\
"""^[\w]+\.define_table\(\s*[\'\"](?P<name>[\w_]+)[\'\"]""",
flags=re.M)
# pattern to find exposed functions in controller
regex_expose = re.compile(\
'^def\s+(?P<name>(?:[a-zA-Z0-9]\w*)|(?:_[a-zA-Z0-9]\w*))\(\)\s*:',
flags=re.M)
regex_include = re.compile(\
'(?P<all>\{\{\s*include\s+[\'"](?P<name>[^\'"]*)[\'"]\s*\}\})')
regex_extend = re.compile(\
'^\s*(?P<all>\{\{\s*extend\s+[\'"](?P<name>[^\'"]+)[\'"]\s*\}\})',re.MULTILINE)
| Python |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Thanks to
* Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support
* Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support
* Denes
* Chris Clark
* clach05
* Denes Lengyel
* and many others who have contributed to current and previous versions
This file contains the DAL support for many relational databases,
including:
- SQLite
- MySQL
- Postgres
- Oracle
- MS SQL
- DB2
- Interbase
- Ingres
- SapDB (experimental)
- Cubrid (experimental)
- CouchDB (experimental)
- MongoDB (in progress)
- Google:nosql
- Google:sql
Example of usage:
>>> # from dal import DAL, Field
### create DAL connection (and create DB if not exists)
>>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None)
### define a table 'person' (create/aster as necessary)
>>> person = db.define_table('person',Field('name','string'))
### insert a record
>>> id = person.insert(name='James')
### retrieve it by id
>>> james = person(id)
### retrieve it by name
>>> james = person(name='James')
### retrieve it by arbitrary query
>>> query = (person.name=='James')&(person.name.startswith('J'))
>>> james = db(query).select(person.ALL)[0]
### update one record
>>> james.update_record(name='Jim')
### update multiple records by query
>>> db(person.name.like('J%')).update(name='James')
1
### delete records by query
>>> db(person.name.lower()=='jim').delete()
0
### retrieve multiple records (rows)
>>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100))
### further filter them
>>> james = people.find(lambda row: row.name=='James').first()
>>> print james.id, james.name
1 James
### check aggrgates
>>> counter = person.id.count()
>>> print db(person).select(counter).first()(counter)
1
### delete one record
>>> james.delete_record()
1
### delete (drop) entire database table
>>> person.drop()
Supported field types:
id string text boolean integer double decimal password upload blob time date datetime,
Supported DAL URI strings:
'sqlite://test.db'
'sqlite:memory'
'jdbc:sqlite://test.db'
'mysql://root:none@localhost/test'
'postgres://mdipierro:none@localhost/test'
'jdbc:postgres://mdipierro:none@localhost/test'
'mssql://web2py:none@A64X2/web2py_test'
'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
'oracle://username:password@database'
'firebird://user:password@server:3050/database'
'db2://DSN=dsn;UID=user;PWD=pass'
'firebird://username:password@hostname/database'
'firebird_embedded://username:password@c://path'
'informix://user:password@server:3050/database'
'informixu://user:password@server:3050/database' # unicode informix
'google:datastore' # for google app engine datastore
'google:sql' # for google app engine with sql (mysql compatible)
'teradata://DSN=dsn;UID=user;PWD=pass' # experimental
For more info:
help(DAL)
help(Field)
"""
###################################################################################
# this file orly exposes DAL and Field
###################################################################################
__all__ = ['DAL', 'Field']
MAXCHARLENGTH = 512
INFINITY = 2**15 # not quite but reasonable default max char length
import re
import sys
import locale
import os
import types
import cPickle
import datetime
import threading
import time
import cStringIO
import csv
import copy
import socket
import logging
import copy_reg
import base64
import shutil
import marshal
import decimal
import struct
import urllib
import hashlib
import uuid
import glob
CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
###################################################################################
# following checks allows running of dal without web2py as a standalone module
###################################################################################
try:
from utils import web2py_uuid
except ImportError:
import uuid
def web2py_uuid(): return str(uuid.uuid4())
try:
import portalocker
have_portalocker = True
except ImportError:
have_portalocker = False
try:
import serializers
have_serializers = True
except ImportError:
have_serializers = False
try:
import validators
have_validators = True
except ImportError:
have_validators = False
logger = logging.getLogger("web2py.dal")
DEFAULT = lambda:0
sql_locker = threading.RLock()
thread = threading.local()
# internal representation of tables with field
# <table>.<field>, tables and fields may only be [a-zA-Z0-0_]
regex_dbname = re.compile('^(\w+)(\:\w+)*')
table_field = re.compile('^[\w_]+\.[\w_]+$')
regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$')
regex_cleanup_fn = re.compile('[\'"\s;]+')
string_unpack=re.compile('(?<!\|)\|(?!\|)')
regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
# list of drivers will be built on the fly
# and lists only what is available
drivers = []
try:
from new import classobj
from google.appengine.ext import db as gae
from google.appengine.api import namespace_manager, rdbms
from google.appengine.api.datastore_types import Key ### needed for belongs on ID
from google.appengine.ext.db.polymodel import PolyModel
drivers.append('google')
except ImportError:
pass
if not 'google' in drivers:
try:
from pysqlite2 import dbapi2 as sqlite3
drivers.append('pysqlite2')
except ImportError:
try:
from sqlite3 import dbapi2 as sqlite3
drivers.append('SQLite3')
except ImportError:
logger.debug('no sqlite3 or pysqlite2.dbapi2 driver')
try:
import contrib.pymysql as pymysql
drivers.append('pymysql')
except ImportError:
logger.debug('no pymysql driver')
try:
import psycopg2
drivers.append('PostgreSQL')
except ImportError:
logger.debug('no psycopg2 driver')
try:
import cx_Oracle
drivers.append('Oracle')
except ImportError:
logger.debug('no cx_Oracle driver')
try:
import pyodbc
drivers.append('MSSQL/DB2')
except ImportError:
logger.debug('no MSSQL/DB2 driver')
try:
import kinterbasdb
drivers.append('Interbase')
except ImportError:
logger.debug('no kinterbasdb driver')
try:
import firebirdsql
drivers.append('Firebird')
except ImportError:
logger.debug('no Firebird driver')
try:
import informixdb
drivers.append('Informix')
logger.warning('Informix support is experimental')
except ImportError:
logger.debug('no informixdb driver')
try:
import sapdb
drivers.append('SAPDB')
logger.warning('SAPDB support is experimental')
except ImportError:
logger.debug('no sapdb driver')
try:
import cubriddb
drivers.append('Cubrid')
logger.warning('Cubrid support is experimental')
except ImportError:
logger.debug('no cubriddb driver')
try:
from com.ziclix.python.sql import zxJDBC
import java.sql
# Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/
from org.sqlite import JDBC # required by java.sql; ensure we have it
drivers.append('zxJDBC')
logger.warning('zxJDBC support is experimental')
is_jdbc = True
except ImportError:
logger.debug('no zxJDBC driver')
is_jdbc = False
try:
import ingresdbi
drivers.append('Ingres')
except ImportError:
logger.debug('no Ingres driver')
# NOTE could try JDBC.......
try:
import couchdb
drivers.append('CouchDB')
except ImportError:
logger.debug('no couchdb driver')
try:
import pymongo
drivers.append('mongoDB')
except:
logger.debug('no mongoDB driver')
if 'google' in drivers:
is_jdbc = False
class GAEDecimalProperty(gae.Property):
"""
GAE decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
super(GAEDecimalProperty, self).__init__(self, **kwargs)
d = '1.'
for x in range(scale):
d += '0'
self.round = decimal.Decimal(d)
def get_value_for_datastore(self, model_instance):
value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance)
if value:
return str(value)
else:
return None
def make_value_from_datastore(self, value):
if value:
return decimal.Decimal(value).quantize(self.round)
else:
return None
def validate(self, value):
value = super(GAEDecimalProperty, self).validate(value)
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
###################################################################################
# class that handles connection pooling (all adapters derived form this one)
###################################################################################
class ConnectionPool(object):
pools = {}
@staticmethod
def set_folder(folder):
thread.folder = folder
# ## this allows gluon to commit/rollback all dbs in this thread
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
if not hasattr(thread,'instances'):
return
while thread.instances:
instance = thread.instances.pop()
getattr(instance,action)()
# ## if you want pools, recycle this connection
really = True
if instance.pool_size:
sql_locker.acquire()
pool = ConnectionPool.pools[instance.uri]
if len(pool) < instance.pool_size:
pool.append(instance.connection)
really = False
sql_locker.release()
if really:
getattr(instance,'close')()
return
def find_or_make_work_folder(self):
""" this actually does not make the folder. it has to be there """
if hasattr(thread,'folder'):
self.folder = thread.folder
else:
self.folder = thread.folder = ''
# Creating the folder if it does not exist
if False and self.folder and not os.path.exists(self.folder):
os.mkdir(self.folder)
def pool_connection(self, f):
if not self.pool_size:
self.connection = f()
else:
uri = self.uri
sql_locker.acquire()
if not uri in ConnectionPool.pools:
ConnectionPool.pools[uri] = []
if ConnectionPool.pools[uri]:
self.connection = ConnectionPool.pools[uri].pop()
sql_locker.release()
else:
sql_locker.release()
self.connection = f()
if not hasattr(thread,'instances'):
thread.instances = []
thread.instances.append(self)
###################################################################################
# this is a generic adapter that does nothing; all others are derived form this one
###################################################################################
class BaseAdapter(ConnectionPool):
driver = None
maxcharlength = INFINITY
commit_on_alter_table = False
support_distributed_transaction = False
uploads_in_blob = False
types = {
'boolean': 'CHAR(1)',
'string': 'CHAR(%(length)s)',
'text': 'TEXT',
'password': 'CHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'CHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'DOUBLE',
'decimal': 'DOUBLE',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
}
def integrity_error(self):
return self.driver.IntegrityError
def file_exists(self, filename):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
return os.path.exists(filename)
def file_open(self, filename, mode='rb', lock=True):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
fileobj = open(filename,mode)
if have_portalocker and lock:
if mode in ('r','rb'):
portalocker.lock(fileobj,portalocker.LOCK_SH)
elif mode in ('w','wb','a'):
portalocker.lock(fileobj,portalocker.LOCK_EX)
else:
fileobj.close()
raise RuntimeError, "Unsupported file_open mode"
return fileobj
def file_close(self, fileobj, unlock=True):
"""
to be used ONLY for files that on GAE may not be on filesystem
"""
if fileobj:
if have_portalocker and unlock:
portalocker.unlock(fileobj)
fileobj.close()
def file_delete(self, filename):
os.unlink(filename)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "None"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
class Dummy(object):
lastrowid = 1
def __getattr__(self, value):
return lambda *a, **b: []
self.connection = Dummy()
self.cursor = Dummy()
def sequence_name(self,tablename):
return '%s_sequence' % tablename
def trigger_name(self,tablename):
return '%s_sequence' % tablename
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
fields = []
sql_fields = {}
sql_fields_aux = {}
TFK = {}
tablename = table._tablename
sortable = 0
for field in table:
sortable += 1
k = field.name
if isinstance(field.type,SQLCustomType):
ftype = field.type.native or field.type.type
elif field.type.startswith('reference'):
referenced = field.type[10:].strip()
constraint_name = self.constraint_name(tablename, field.name)
if hasattr(table,'_primarykey'):
rtablename,rfieldname = referenced.split('.')
rtable = table._db[rtablename]
rfield = rtable[rfieldname]
# must be PK reference or unique
if rfieldname in rtable._primarykey or rfield.unique:
ftype = self.types[rfield.type[:9]] % dict(length=rfield.length)
# multicolumn primary key reference?
if not rfield.unique and len(rtable._primarykey)>1 :
# then it has to be a table level FK
if rtablename not in TFK:
TFK[rtablename] = {}
TFK[rtablename][rfieldname] = field.name
else:
ftype = ftype + \
self.types['reference FK'] %dict(\
constraint_name=constraint_name,
table_name=tablename,
field_name=field.name,
foreign_key='%s (%s)'%(rtablename, rfieldname),
on_delete_action=field.ondelete)
else:
# make a guess here for circular references
id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id'
ftype = self.types[field.type[:9]]\
% dict(table_name=tablename,
field_name=field.name,
constraint_name=constraint_name,
foreign_key=referenced + ('(%s)' % id_fieldname),
on_delete_action=field.ondelete)
elif field.type.startswith('list:reference'):
ftype = self.types[field.type[:14]]
elif field.type.startswith('decimal'):
precision, scale = map(int,field.type[8:-1].split(','))
ftype = self.types[field.type[:7]] % \
dict(precision=precision,scale=scale)
elif not field.type in self.types:
raise SyntaxError, 'Field: unknown field type: %s for %s' % \
(field.type, field.name)
else:
ftype = self.types[field.type]\
% dict(length=field.length)
if not field.type.startswith('id') and not field.type.startswith('reference'):
if field.notnull:
ftype += ' NOT NULL'
else:
ftype += self.ALLOW_NULL()
if field.unique:
ftype += ' UNIQUE'
# add to list of fields
sql_fields[field.name] = dict(sortable=sortable,
type=str(field.type),
sql=ftype)
if isinstance(field.default,(str,int,float)):
# caveat: sql_fields and sql_fields_aux differ for default values
# sql_fields is used to trigger migrations and sql_fields_aux
# are used for create table
# the reason is that we do not want to trigger a migration simply
# because a default value changes
not_null = self.NOT_NULL(field.default,field.type)
ftype = ftype.replace('NOT NULL',not_null)
sql_fields_aux[field.name] = dict(sql=ftype)
fields.append('%s %s' % (field.name, ftype))
other = ';'
# backend-specific extensions to fields
if self.dbengine == 'mysql':
if not hasattr(table, "_primarykey"):
fields.append('PRIMARY KEY(%s)' % table._id.name)
other = ' ENGINE=InnoDB CHARACTER SET utf8;'
fields = ',\n '.join(fields)
for rtablename in TFK:
rfields = TFK[rtablename]
pkeys = table._db[rtablename]._primarykey
fkeys = [ rfields[k] for k in pkeys ]
fields = fields + ',\n ' + \
self.types['reference TFK'] %\
dict(table_name=tablename,
field_name=', '.join(fkeys),
foreign_table=rtablename,
foreign_key=', '.join(pkeys),
on_delete_action=field.ondelete)
if hasattr(table,'_primarykey'):
query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \
(tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other)
else:
query = '''CREATE TABLE %s(\n %s\n)%s''' % \
(tablename, fields, other)
if self.uri.startswith('sqlite:///'):
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding)
else:
dbpath = self.folder
if not migrate:
return query
elif self.uri.startswith('sqlite:memory'):
table._dbt = None
elif isinstance(migrate, str):
table._dbt = os.path.join(dbpath, migrate)
else:
table._dbt = os.path.join(dbpath, '%s_%s.table' \
% (table._db._uri_hash, tablename))
if table._dbt:
table._loggername = os.path.join(dbpath, 'sql.log')
logfile = self.file_open(table._loggername, 'a')
else:
logfile = None
if not table._dbt or not self.file_exists(table._dbt):
if table._dbt:
logfile.write('timestamp: %s\n'
% datetime.datetime.today().isoformat())
logfile.write(query + '\n')
if not fake_migrate:
self.create_sequence_and_triggers(query,table)
table._db.commit()
if table._dbt:
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields, tfile)
self.file_close(tfile)
if fake_migrate:
logfile.write('faked!\n')
else:
logfile.write('success!\n')
else:
tfile = self.file_open(table._dbt, 'r')
try:
sql_fields_old = cPickle.load(tfile)
except EOFError:
self.file_close(tfile)
self.file_close(logfile)
raise RuntimeError, 'File %s appears corrupted' % table._dbt
self.file_close(tfile)
if sql_fields != sql_fields_old:
self.migrate_table(table,
sql_fields, sql_fields_old,
sql_fields_aux, logfile,
fake_migrate=fake_migrate)
self.file_close(logfile)
return query
def migrate_table(
self,
table,
sql_fields,
sql_fields_old,
sql_fields_aux,
logfile,
fake_migrate=False,
):
tablename = table._tablename
def fix(item):
k,v=item
if not isinstance(v,dict):
v=dict(type='unkown',sql=v)
return k.lower(),v
### make sure all field names are lower case to avoid conflicts
sql_fields = dict(map(fix,sql_fields.items()))
sql_fields_old = dict(map(fix,sql_fields_old.items()))
sql_fields_aux = dict(map(fix,sql_fields_aux.items()))
keys = sql_fields.keys()
for key in sql_fields_old:
if not key in keys:
keys.append(key)
if self.dbengine == 'mssql':
new_add = '; ALTER TABLE %s ADD ' % tablename
else:
new_add = ', ADD '
metadata_change = False
sql_fields_current = copy.copy(sql_fields_old)
for key in keys:
query = None
if not key in sql_fields_old:
sql_fields_current[key] = sql_fields[key]
query = ['ALTER TABLE %s ADD %s %s;' % \
(tablename, key,
sql_fields_aux[key]['sql'].replace(', ', new_add))]
metadata_change = True
elif self.dbengine == 'sqlite':
if key in sql_fields:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
elif not key in sql_fields:
del sql_fields_current[key]
if not self.dbengine in ('firebird',):
query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)]
else:
query = ['ALTER TABLE %s DROP %s;' % (tablename, key)]
metadata_change = True
elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \
and not isinstance(table[key].type, SQLCustomType) \
and not (table[key].type.startswith('reference') and \
sql_fields[key]['sql'].startswith('INT,') and \
sql_fields_old[key]['sql'].startswith('INT NOT NULL,')):
sql_fields_current[key] = sql_fields[key]
t = tablename
tt = sql_fields_aux[key]['sql'].replace(', ', new_add)
if not self.dbengine in ('firebird',):
query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt),
'UPDATE %s SET %s__tmp=%s;' % (t, key, key),
'ALTER TABLE %s DROP COLUMN %s;' % (t, key),
'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
'UPDATE %s SET %s=%s__tmp;' % (t, key, key),
'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)]
else:
query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt),
'UPDATE %s SET %s__tmp=%s;' % (t, key, key),
'ALTER TABLE %s DROP %s;' % (t, key),
'ALTER TABLE %s ADD %s %s;' % (t, key, tt),
'UPDATE %s SET %s=%s__tmp;' % (t, key, key),
'ALTER TABLE %s DROP %s__tmp;' % (t, key)]
metadata_change = True
elif sql_fields[key]['type'] != sql_fields_old[key]['type']:
sql_fields_current[key] = sql_fields[key]
metadata_change = True
if query:
logfile.write('timestamp: %s\n'
% datetime.datetime.today().isoformat())
table._db['_lastsql'] = '\n'.join(query)
for sub_query in query:
logfile.write(sub_query + '\n')
if not fake_migrate:
self.execute(sub_query)
# caveat. mysql, oracle and firebird do not allow multiple alter table
# in one transaction so we must commit partial transactions and
# update table._dbt after alter table.
if table._db._adapter.commit_on_alter_table:
table._db.commit()
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
logfile.write('success!\n')
else:
logfile.write('faked!\n')
elif metadata_change:
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
if metadata_change and \
not (query and self.dbengine in ('mysql','oracle','firebird')):
table._db.commit()
tfile = self.file_open(table._dbt, 'w')
cPickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
def LOWER(self,first):
return 'LOWER(%s)' % self.expand(first)
def UPPER(self,first):
return 'UPPER(%s)' % self.expand(first)
def EXTRACT(self,first,what):
return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
def AGGREGATE(self,first,what):
return "%s(%s)" % (what,self.expand(first))
def JOIN(self):
return 'JOIN'
def LEFT_JOIN(self):
return 'LEFT JOIN'
def RANDOM(self):
return 'Random()'
def NOT_NULL(self,default,field_type):
return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
def COALESCE_ZERO(self,first):
return 'COALESCE(%s,0)' % self.expand(first)
def ALLOW_NULL(self):
return ''
def SUBSTRING(self,field,parameters):
return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self,key):
return 'PRIMARY KEY(%s)' % key
def _drop(self,table,mode):
return ['DROP TABLE %s;' % table]
def drop(self, table, mode=''):
if table._dbt:
logfile = self.file_open(table._loggername, 'a')
queries = self._drop(table, mode)
for query in queries:
if table._dbt:
logfile.write(query + '\n')
self.execute(query)
table._db.commit()
del table._db[table._tablename]
del table._db.tables[table._db.tables.index(table._tablename)]
table._db._update_referenced_by(table._tablename)
if table._dbt:
self.file_delete(table._dbt)
logfile.write('success!\n')
def _insert(self,table,fields):
keys = ','.join(f.name for f,v in fields)
values = ','.join(self.expand(v,f.type) for f,v in fields)
return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
def insert(self,table,fields):
query = self._insert(table,fields)
try:
self.execute(query)
except Exception, e:
if isinstance(e,self.integrity_error_class()):
return None
raise e
if hasattr(table,'_primarykey'):
return dict([(k[0].name, k[1]) for k in fields \
if k[0].name in table._primarykey])
id = self.lastrowid(table)
if not isinstance(id,int):
return id
rid = Reference(id)
(rid._table, rid._record) = (table, None)
return rid
def bulk_insert(self,table,items):
return [self.insert(table,item) for item in items]
def NOT(self,first):
return '(NOT %s)' % self.expand(first)
def AND(self,first,second):
return '(%s AND %s)' % (self.expand(first),self.expand(second))
def OR(self,first,second):
return '(%s OR %s)' % (self.expand(first),self.expand(second))
def BELONGS(self,first,second):
if isinstance(second,str):
return '(%s IN (%s))' % (self.expand(first),second[:-1])
elif second==[] or second==():
return '(0)'
items =','.join(self.expand(item,first.type) for item in second)
return '(%s IN (%s))' % (self.expand(first),items)
def LIKE(self,first,second):
return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string'))
def STARTSWITH(self,first,second):
return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
def ENDSWITH(self,first,second):
return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
def CONTAINS(self,first,second):
if first.type in ('string','text'):
key = '%'+str(second).replace('%','%%')+'%'
elif first.type.startswith('list:'):
key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%'
return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
def EQ(self,first,second=None):
if second is None:
return '(%s IS NULL)' % self.expand(first)
return '(%s = %s)' % (self.expand(first),self.expand(second,first.type))
def NE(self,first,second=None):
if second is None:
return '(%s IS NOT NULL)' % self.expand(first)
return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type))
def LT(self,first,second=None):
return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
def LE(self,first,second=None):
return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
def GT(self,first,second=None):
return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
def GE(self,first,second=None):
return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
def ADD(self,first,second):
return '(%s + %s)' % (self.expand(first),self.expand(second,first.type))
def SUB(self,first,second):
return '(%s - %s)' % (self.expand(first),self.expand(second,first.type))
def MUL(self,first,second):
return '(%s * %s)' % (self.expand(first),self.expand(second,first.type))
def DIV(self,first,second):
return '(%s / %s)' % (self.expand(first),self.expand(second,first.type))
def MOD(self,first,second):
return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type))
def AS(self,first,second):
return '%s AS %s' % (self.expand(first),second)
def ON(self,first,second):
return '%s ON %s' % (self.expand(first),self.expand(second))
def INVERT(self,first):
return '%s DESC' % self.expand(first)
def COMMA(self,first,second):
return '%s, %s' % (self.expand(first),self.expand(second))
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
return str(expression)
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
else:
return expression.op()
elif field_type:
return self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
return ','.join([self.represent(item,field_type) for item in expression])
else:
return str(expression)
def alias(self,table,alias):
"""
given a table object, makes a new table object
with alias name.
"""
other = copy.copy(table)
other['_ot'] = other._tablename
other['ALL'] = SQLALL(other)
other['_tablename'] = alias
for fieldname in other.fields:
other[fieldname] = copy.copy(other[fieldname])
other[fieldname]._tablename = alias
other[fieldname].tablename = alias
other[fieldname].table = other
table._db[alias] = other
return other
def _truncate(self,table,mode = ''):
tablename = table._tablename
return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
def truncate(self,table,mode= ' '):
# Prepare functions "write_to_logfile" and "close_logfile"
if table._dbt:
logfile = self.file_open(table._loggername, 'a')
else:
class Logfile(object):
def write(self, value):
pass
def close(self):
pass
logfile = Logfile()
try:
queries = table._db._adapter._truncate(table, mode)
for query in queries:
logfile.write(query + '\n')
self.execute(query)
table._db.commit()
logfile.write('success!\n')
finally:
logfile.close()
def _update(self,tablename,query,fields):
if query:
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields])
return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
def update(self,tablename,query,fields):
sql = self._update(tablename,query,fields)
self.execute(sql)
try:
return self.cursor.rowcount
except:
return None
def _delete(self,tablename, query):
if query:
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
return 'DELETE FROM %s%s;' % (tablename, sql_w)
def delete(self,tablename,query):
sql = self._delete(tablename,query)
### special code to handle CASCADE in SQLite
db = self.db
table = db[tablename]
if self.dbengine=='sqlite' and table._referenced_by:
deleted = [x[table._id.name] for x in db(query).select(table._id)]
### end special code to handle CASCADE in SQLite
self.execute(sql)
try:
counter = self.cursor.rowcount
except:
counter = None
### special code to handle CASCADE in SQLite
if self.dbengine=='sqlite' and counter:
for tablename,fieldname in table._referenced_by:
f = db[tablename][fieldname]
if f.type=='reference '+table._tablename and f.ondelete=='CASCADE':
db(db[tablename][fieldname].belongs(deleted)).delete()
### end special code to handle CASCADE in SQLite
return counter
def get_table(self,query):
tablenames = self.tables(query)
if len(tablenames)==1:
return tablenames[0]
elif len(tablenames)<1:
raise RuntimeError, "No table selected"
else:
raise RuntimeError, "Too many tables selected"
def _select(self, query, fields, attributes):
for key in set(attributes.keys())-set(('orderby','groupby','limitby',
'required','cache','left',
'distinct','having', 'join')):
raise SyntaxError, 'invalid select attribute: %s' % key
# ## if not fields specified take them all from the requested tables
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
fields = new_fields
tablenames = self.tables(query)
query = self.filter_tenant(query,tablenames)
if not fields:
for table in tablenames:
for field in self.db[table]:
fields.append(field)
else:
for field in fields:
if isinstance(field,basestring) and table_field.match(field):
tn,fn = field.split('.')
field = self.db[tn][fn]
for tablename in self.tables(field):
if not tablename in tablenames:
tablenames.append(tablename)
if len(tablenames) < 1:
raise SyntaxError, 'Set: no tables selected'
sql_f = ', '.join(map(self.expand,fields))
self._colnames = [c.strip() for c in sql_f.split(', ')]
if query:
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_o = ''
sql_s = ''
left = attributes.get('left', False)
inner_join = attributes.get('join', False)
distinct = attributes.get('distinct', False)
groupby = attributes.get('groupby', False)
orderby = attributes.get('orderby', False)
having = attributes.get('having', False)
limitby = attributes.get('limitby', False)
if distinct is True:
sql_s += 'DISTINCT'
elif distinct:
sql_s += 'DISTINCT ON (%s)' % distinct
if inner_join:
icommand = self.JOIN()
if not isinstance(inner_join, (tuple, list)):
inner_join = [inner_join]
ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)]
ijoinon = [t for t in inner_join if isinstance(t, Expression)]
ijoinont = [t.first._tablename for t in ijoinon]
iexcluded = [t for t in tablenames if not t in ijoint + ijoinont]
if left:
join = attributes['left']
command = self.LEFT_JOIN()
if not isinstance(join, (tuple, list)):
join = [join]
joint = [t._tablename for t in join if not isinstance(t,Expression)]
joinon = [t for t in join if isinstance(t, Expression)]
#patch join+left patch (solves problem with ordering in left joins)
tables_to_merge={}
[tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon]
joinont = [t.first._tablename for t in joinon]
[tables_to_merge.pop(t) for t in joinont if t in tables_to_merge]
important_tablenames = joint + joinont + tables_to_merge.keys()
excluded = [t for t in tablenames if not t in important_tablenames ]
def alias(t):
return str(self.db[t])
if inner_join and not left:
sql_t = ', '.join(alias(t) for t in iexcluded)
for t in ijoinon:
sql_t += ' %s %s' % (icommand, str(t))
elif not inner_join and left:
sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()])
if joint:
sql_t += ' %s %s' % (command, ','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, str(t))
elif inner_join and left:
sql_t = ','.join([alias(t) for t in excluded + \
tables_to_merge.keys() if t in iexcluded ])
for t in ijoinon:
sql_t += ' %s %s' % (icommand, str(t))
if joint:
sql_t += ' %s %s' % (command, ','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, str(t))
else:
sql_t = ', '.join(alias(t) for t in tablenames)
if groupby:
if isinstance(groupby, (list, tuple)):
groupby = xorify(groupby)
sql_o += ' GROUP BY %s' % self.expand(groupby)
if having:
sql_o += ' HAVING %s' % attributes['having']
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if str(orderby) == '<random>':
sql_o += ' ORDER BY %s' % self.RANDOM()
else:
sql_o += ' ORDER BY %s' % self.expand(orderby)
if limitby:
if not orderby and tablenames:
sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])])
# oracle does not support limitby
return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def select(self,query,fields,attributes):
"""
Always returns a Rows object, even if it may be empty
"""
def response(sql):
self.execute(sql)
return self.cursor.fetchall()
sql = self._select(query,fields,attributes)
if attributes.get('cache', None):
(cache_model, time_expire) = attributes['cache']
del attributes['cache']
key = self.uri + '/' + sql
key = (key<=200) and key or hashlib.md5(key).hexdigest()
rows = cache_model(key, lambda: response(sql), time_expire)
else:
rows = response(sql)
if isinstance(rows,tuple):
rows = list(rows)
limitby = attributes.get('limitby',None) or (0,)
rows = self.rowslice(rows,limitby[0],None)
return self.parse(rows,self._colnames)
def _count(self,query,distinct=None):
tablenames = self.tables(query)
if query:
sql_w = ' WHERE ' + self.expand(query)
else:
sql_w = ''
sql_t = ','.join(tablenames)
if distinct:
if isinstance(distinct,(list,tuple)):
distinct = xorify(distinct)
sql_d = self.expand(distinct)
return 'SELECT count(DISTINCT %s) FROM %s%s' % (sql_d, sql_t, sql_w)
return 'SELECT count(*) FROM %s%s' % (sql_t, sql_w)
def count(self,query,distinct=None):
self.execute(self._count(query,distinct))
return self.cursor.fetchone()[0]
def tables(self,query):
tables = set()
if isinstance(query, Field):
tables.add(query.tablename)
elif isinstance(query, (Expression, Query)):
if query.first!=None:
tables = tables.union(self.tables(query.first))
if query.second!=None:
tables = tables.union(self.tables(query.second))
return list(tables)
def commit(self):
return self.connection.commit()
def rollback(self):
return self.connection.rollback()
def close(self):
return self.connection.close()
def distributed_transaction_begin(self,key):
return
def prepare(self,key):
self.connection.prepare()
def commit_prepared(self,key):
self.connection.commit()
def rollback_prepared(self,key):
self.connection.rollback()
def concat_add(self,table):
return ', ADD '
def constraint_name(self, table, fieldname):
return '%s_%s__constraint' % (table,fieldname)
def create_sequence_and_triggers(self, query, table, **args):
self.execute(query)
def log_execute(self,*a,**b):
self.db._lastsql = a[0]
t0 = time.time()
ret = self.cursor.execute(*a,**b)
self.db._timings.append((a[0],time.time()-t0))
return ret
def execute(self,*a,**b):
return self.log_execute(*a, **b)
def represent(self, obj, fieldtype):
if isinstance(obj,CALLABLETYPES):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
return fieldtype.encoder(obj)
if isinstance(obj, (Expression, Field)):
return str(obj)
if fieldtype.startswith('list:'):
if not obj:
obj = []
if not isinstance(obj, (list, tuple)):
obj = [obj]
if isinstance(obj, (list, tuple)):
obj = bar_encode(obj)
if obj is None:
return 'NULL'
if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']:
return 'NULL'
r = self.represent_exceptions(obj,fieldtype)
if r != None:
return r
if fieldtype == 'boolean':
if obj and not str(obj)[:1].upper() in ['F', '0']:
return "'T'"
else:
return "'F'"
if fieldtype == 'id' or fieldtype == 'integer':
return str(int(obj))
if fieldtype.startswith('decimal'):
return str(obj)
elif fieldtype.startswith('reference'): # reference
if fieldtype.find('.')>0:
return repr(obj)
elif isinstance(obj, (Row, Reference)):
return str(obj['id'])
return str(int(obj))
elif fieldtype == 'double':
return repr(float(obj))
if isinstance(obj, unicode):
obj = obj.encode(self.db_codec)
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
elif fieldtype == 'time':
if isinstance(obj, datetime.time):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
if not isinstance(obj,str):
obj = str(obj)
try:
obj.decode(self.db_codec)
except:
obj = obj.decode('latin1').encode(self.db_codec)
return "'%s'" % obj.replace("'", "''")
def represent_exceptions(self, obj, fieldtype):
return None
def lastrowid(self,table):
return None
def integrity_error_class(self):
return type(None)
def rowslice(self,rows,minimum=0,maximum=None):
""" by default this function does nothing, overload when db does not do slicing """
return rows
def parse(self, rows, colnames, blob_decode=True):
db = self.db
virtualtables = []
new_rows = []
for (i,row) in enumerate(rows):
new_row = Row()
for j,colname in enumerate(colnames):
value = row[j]
if not table_field.match(colnames[j]):
if not '_extra' in new_row:
new_row['_extra'] = Row()
new_row['_extra'][colnames[j]] = value
select_as_parser = re.compile("\s+AS\s+(\S+)")
new_column_name = select_as_parser.search(colnames[j])
if not new_column_name is None:
column_name = new_column_name.groups(0)
setattr(new_row,column_name[0],value)
continue
(tablename, fieldname) = colname.split('.')
table = db[tablename]
field = table[fieldname]
field_type = field.type
if field.type != 'blob' and isinstance(value, str):
try:
value = value.decode(db._db_codec)
except Exception:
pass
if isinstance(value, unicode):
value = value.encode('utf-8')
if not tablename in new_row:
colset = new_row[tablename] = Row()
if tablename not in virtualtables:
virtualtables.append(tablename)
else:
colset = new_row[tablename]
if isinstance(field_type, SQLCustomType):
colset[fieldname] = field_type.decoder(value)
# field_type = field_type.type
elif not isinstance(field_type, str) or value is None:
colset[fieldname] = value
elif isinstance(field_type, str) and \
field_type.startswith('reference'):
referee = field_type[10:].strip()
if not '.' in referee:
colset[fieldname] = rid = Reference(value)
(rid._table, rid._record) = (db[referee], None)
else: ### reference not by id
colset[fieldname] = value
elif field_type == 'boolean':
if value == True or str(value)[:1].lower() == 't':
colset[fieldname] = True
else:
colset[fieldname] = False
elif field_type == 'date' \
and (not isinstance(value, datetime.date)\
or isinstance(value, datetime.datetime)):
(y, m, d) = map(int, str(value)[:10].strip().split('-'))
colset[fieldname] = datetime.date(y, m, d)
elif field_type == 'time' \
and not isinstance(value, datetime.time):
time_items = map(int,str(value)[:8].strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
colset[fieldname] = datetime.time(h, mi, s)
elif field_type == 'datetime'\
and not isinstance(value, datetime.datetime):
(y, m, d) = map(int,str(value)[:10].strip().split('-'))
time_items = map(int,str(value)[11:19].strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
colset[fieldname] = datetime.datetime(y, m, d, h, mi, s)
elif field_type == 'blob' and blob_decode:
colset[fieldname] = base64.b64decode(str(value))
elif field_type.startswith('decimal'):
decimals = int(field_type[8:-1].split(',')[-1])
if self.dbengine == 'sqlite':
value = ('%.' + str(decimals) + 'f') % value
if not isinstance(value, decimal.Decimal):
value = decimal.Decimal(str(value))
colset[fieldname] = value
elif field_type.startswith('list:integer'):
if not self.dbengine=='google:datastore':
colset[fieldname] = bar_decode_integer(value)
else:
colset[fieldname] = value
elif field_type.startswith('list:reference'):
if not self.dbengine=='google:datastore':
colset[fieldname] = bar_decode_integer(value)
else:
colset[fieldname] = value
elif field_type.startswith('list:string'):
if not self.dbengine=='google:datastore':
colset[fieldname] = bar_decode_string(value)
else:
colset[fieldname] = value
else:
colset[fieldname] = value
if field_type == 'id':
id = colset[field.name]
colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a)
colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete()
for (referee_table, referee_name) in \
table._referenced_by:
s = db[referee_table][referee_name]
if not referee_table in colset:
# for backward compatibility
colset[referee_table] = Set(db, s == id)
### add new feature?
### colset[referee_table+'_by_'+refree_name] = Set(db, s == id)
colset['id'] = id
new_rows.append(new_row)
rowsobj = Rows(db, new_rows, colnames, rawrows=rows)
for tablename in virtualtables:
for item in db[tablename].virtualfields:
try:
rowsobj = rowsobj.setvirtualfields(**{tablename:item})
except KeyError:
# to avoid breaking virtualfields when partial select
pass
return rowsobj
def filter_tenant(self,query,tablenames):
fieldname = self.db._request_tenant
for tablename in tablenames:
table = self.db[tablename]
if fieldname in table:
default = table[fieldname].default
if default!=None:
query = query&(table[fieldname]==default)
return query
###################################################################################
# List of all the available adapters, they all extend BaseAdapter
###################################################################################
class SQLiteAdapter(BaseAdapter):
driver = globals().get('sqlite3',None)
def EXTRACT(self,field,what):
return "web2py_extract('%s',%s)" % (what,self.expand(field))
@staticmethod
def web2py_extract(lookup, s):
table = {
'year': (0, 4),
'month': (5, 7),
'day': (8, 10),
'hour': (11, 13),
'minute': (14, 16),
'second': (17, 19),
}
try:
(i, j) = table[lookup]
return int(s[i:j])
except:
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "sqlite"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
dbpath = ':memory:'
else:
dbpath = uri.split('://')[1]
if dbpath[0] != '/':
dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath)
if not 'check_same_thread' in driver_args:
driver_args['check_same_thread'] = False
def connect(dbpath=dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
def _truncate(self,table,mode = ''):
tablename = table._tablename
return ['DELETE FROM %s;' % tablename,
"DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
def lastrowid(self,table):
return self.cursor.lastrowid
class JDBCSQLiteAdapter(SQLiteAdapter):
driver = globals().get('zxJDBC',None)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "sqlite"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
dbpath = ':memory:'
else:
dbpath = uri.split('://')[1]
if dbpath[0] != '/':
dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath)
def connect(dbpath=dbpath,driver_args=driver_args):
return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
# FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs
# self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
def execute(self,a):
return self.log_execute(a[:-1])
class MySQLAdapter(BaseAdapter):
driver = globals().get('pymysql',None)
maxcharlength = 255
commit_on_alter_table = True
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONGTEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONGBLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'INT AUTO_INCREMENT NOT NULL',
'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONGTEXT',
'list:string': 'LONGTEXT',
'list:reference': 'LONGTEXT',
}
def RANDOM(self):
return 'RAND()'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def _drop(self,table,mode):
# breaks db integrity but without this mysql does not drop table
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
def distributed_transaction_begin(self,key):
self.execute('XA START;')
def prepare(self,key):
self.execute("XA END;")
self.execute("XA PREPARE;")
def commit_prepared(self,ley):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
self.execute("XA ROLLBACK;")
def concat_add(self,table):
return '; ALTER TABLE %s ADD ' % table
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
driver_args.update(dict(db=db,
user=credential_decoder(user),
passwd=credential_decoder(password),
host=host,
port=port,
charset=charset))
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def lastrowid(self,table):
self.execute('select last_insert_id();')
return int(self.cursor.fetchone()[0])
class PostgreSQLAdapter(BaseAdapter):
driver = globals().get('psycopg2',None)
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
}
def sequence_name(self,table):
return '%s_id_Seq' % table
def RANDOM(self):
return 'RANDOM()'
def distributed_transaction_begin(self,key):
return
def prepare(self,key):
self.execute("PREPARE TRANSACTION '%s';" % key)
def commit_prepared(self,key):
self.execute("COMMIT PREPARED '%s';" % key)
def rollback_prepared(self,key):
self.execute("ROLLBACK PREPARED '%s';" % key)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
# self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
# self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
# % (table._tablename, table._fieldname, table._sequence_name))
self.execute(query)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "postgres"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL"
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = m.group('port') or '5432'
sslmode = m.group('sslmode')
if sslmode:
msg = ("dbname='%s' user='%s' host='%s'"
"port=%s password='%s' sslmode='%s'") \
% (db, user, host, port, password, sslmode)
else:
msg = ("dbname='%s' user='%s' host='%s'"
"port=%s password='%s'") \
% (db, user, host, port, password)
def connect(msg=msg,driver_args=driver_args):
return self.driver.connect(msg,**driver_args)
self.pool_connection(connect)
self.connection.set_client_encoding('UTF8')
self.cursor = self.connection.cursor()
self.execute('BEGIN;')
self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
self.execute("SET standard_conforming_strings=on;")
def lastrowid(self,table):
self.execute("select currval('%s')" % table._sequence_name)
return int(self.cursor.fetchone()[0])
def LIKE(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string'))
def STARTSWITH(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
def ENDSWITH(self,first,second):
return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
def CONTAINS(self,first,second):
if first.type in ('string','text'):
key = '%'+str(second).replace('%','%%')+'%'
elif first.type.startswith('list:'):
key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%'
return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "postgres"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL"
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = m.group('port') or '5432'
msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
def connect(msg=msg,driver_args=driver_args):
return self.driver.connect(*msg,**driver_args)
self.pool_connection(connect)
self.connection.set_client_encoding('UTF8')
self.cursor = self.connection.cursor()
self.execute('BEGIN;')
self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
class OracleAdapter(BaseAdapter):
driver = globals().get('cx_Oracle',None)
commit_on_alter_table = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR2(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR2(%(length)s)',
'blob': 'CLOB',
'upload': 'VARCHAR2(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATE',
'id': 'NUMBER PRIMARY KEY',
'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def sequence_name(self,tablename):
return '%s_sequence' % tablename
def trigger_name(self,tablename):
return '%s_trigger' % tablename
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'dbms_random.value'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def constraint_name(self, tablename, fieldname):
constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)
if len(constraint_name)>30:
constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])
return constraint_name
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return ":CLOB('%s')" % obj
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "oracle"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
if not 'threaded' in driver_args:
driver_args['threaded']=True
def connect(uri=uri,driver_args=driver_args):
return self.driver.connect(uri,**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))")
def execute(self, command):
args = []
i = 1
while True:
m = self.oracle_fix.match(command)
if not m:
break
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
return self.log_execute(command[:-1], args)
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name)
self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT %s.currval FROM dual;' % sequence_name)
return int(self.cursor.fetchone()[0])
class MSSQLAdapter(BaseAdapter):
driver = globals().get('pyodbc',None)
types = {
'boolean': 'BIT',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
}
def EXTRACT(self,field,what):
return "DATEPART(%s,%s)" % (what, self.expand(field))
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'NEWID()'
def ALLOW_NULL(self):
return ' NULL'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self,key):
return 'PRIMARY KEY CLUSTERED (%s)' % key
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'boolean':
if obj and not str(obj)[0].upper() == 'F':
return '1'
else:
return '0'
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}, fake_connect=False):
self.db = db
self.dbengine = "mssql"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
uri = uri.split('://')[1]
if '@' not in uri:
try:
m = re.compile('^(?P<dsn>.+)$').match(uri)
if not m:
raise SyntaxError, \
'Parsing uri string(%s) has no result' % self.uri
dsn = m.group('dsn')
if not dsn:
raise SyntaxError, 'DSN required'
except SyntaxError, e:
logger.error('NdGpatch error')
raise e
cnxn = 'DSN=%s' % dsn
else:
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# Default values (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = { 'DRIVER':'{SQL Server}' }
urlargs = m.group('urlargs') or ''
argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)')
for argmatch in argpattern.finditer(urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue')
urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()])
cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connect(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
if not fake_connect:
self.pool_connection(connect)
self.cursor = self.connection.cursor()
def lastrowid(self,table):
#self.execute('SELECT @@IDENTITY;')
self.execute('SELECT SCOPE_IDENTITY();')
return int(self.cursor.fetchone()[0])
def integrity_error_class(self):
return pyodbc.IntegrityError
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class MSSQL2Adapter(MSSQLAdapter):
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NTEXT',
'password': 'NVARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'NVARCHAR(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'NTEXT',
'list:string': 'NTEXT',
'list:reference': 'NTEXT',
}
def represent(self, obj, fieldtype):
value = BaseAdapter.represent(self, obj, fieldtype)
if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'":
value = 'N'+value
return value
def execute(self,a):
return self.log_execute(a.decode('utf8'))
class FireBirdAdapter(BaseAdapter):
driver = globals().get('pyodbc',None)
commit_on_alter_table = False
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'DOUBLE PRECISION',
'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
}
def sequence_name(self,tablename):
return 'genid_%s' % tablename
def trigger_name(self,tablename):
return 'trg_id_%s' % tablename
def RANDOM(self):
return 'RAND()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self,table,mode = ''):
return ['DELETE FROM %s;' % table._tablename,
'SET GENERATOR %s TO 0;' % table._sequence_name]
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "firebird"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL: %s" % uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
port = int(m.group('port') or 3050)
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
charset = m.group('charset') or 'UTF8'
driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db),
user = credential_decoder(user),
password = credential_decoder(password),
charset = charset))
if adapter_args.has_key('driver_name'):
if adapter_args['driver_name'] == 'kinterbasdb':
self.driver = kinterbasdb
elif adapter_args['driver_name'] == 'firebirdsql':
self.driver = firebirdsql
else:
self.driver = kinterbasdb
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('create generator %s;' % sequence_name)
self.execute('set generator %s to 0;' % sequence_name)
self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name)
return int(self.cursor.fetchone()[0])
class FireBirdEmbeddedAdapter(FireBirdAdapter):
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "firebird"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
pathdb = m.group('path')
if not pathdb:
raise SyntaxError, 'Path required'
charset = m.group('charset')
if not charset:
charset = 'UTF8'
host = ''
driver_args.update(dict(host=host,
database=pathdb,
user=credential_decoder(user),
password=credential_decoder(password),
charset=charset))
#def connect(driver_args=driver_args):
# return kinterbasdb.connect(**driver_args)
if adapter_args.has_key('driver_name'):
if adapter_args['driver_name'] == 'kinterbasdb':
self.driver = kinterbasdb
elif adapter_args['driver_name'] == 'firebirdsql':
self.driver = firebirdsql
else:
self.driver = kinterbasdb
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
class InformixAdapter(BaseAdapter):
driver = globals().get('informixdb',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'SERIAL',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
}
def RANDOM(self):
return 'Random()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
dbms_version = int(self.connection.dbms_version.split('.')[0])
if lmin and (dbms_version >= 10):
# Requires Informix 10.0+
sql_s += ' SKIP %d' % (lmin, )
if fetch_amt and (dbms_version >= 9):
# Requires Informix 9.0+
sql_s += ' FIRST %d' % (fetch_amt, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "informix"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
user = credential_decoder(user)
password = credential_decoder(password)
dsn = '%s@%s' % (db,host)
driver_args.update(dict(user=user,password=password,autocommit=True))
def connect(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
return self.cursor.sqlerrd[1]
def integrity_error_class(self):
return informixdb.IntegrityError
class DB2Adapter(BaseAdapter):
driver = globals().get('pyodbc',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RAND()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return "BLOB('%s')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T','-').replace(':','.')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+'-00.00.00'
return "'%s'" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "db2"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
cnxn = uri.split('://', 1)[1]
def connect(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table)
return int(self.cursor.fetchone()[0])
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class TeradataAdapter(DB2Adapter):
driver = globals().get('pyodbc',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "teradata"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
cnxn = uri.split('://', 1)[1]
def connect(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
# (ANSI-SQL wants this form of name
# to be a delimited identifier)
class IngresAdapter(BaseAdapter):
driver = globals().get('ingresdbi',None)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME,
'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RANDOM()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
if fetch_amt:
sql_s += ' FIRST %d ' % (fetch_amt, )
if lmin:
# Requires Ingres 9.2+
sql_o += ' OFFSET %d' % (lmin, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "ingres"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
connstr = self._uri.split(':', 1)[1]
# Simple URI processing
connstr = connstr.lstrip()
while connstr.startswith('/'):
connstr = connstr[1:]
database_name=connstr # Assume only (local) dbname is passed in
vnode = '(local)'
servertype = 'ingres'
trace = (0, None) # No tracing
driver_args.update(dict(database=database_name,
vnode=vnode,
servertype=servertype,
trace=trace))
def connect(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
def create_sequence_and_triggers(self, query, table, **args):
# post create table auto inc code (if needed)
# modify table to btree for performance....
# Older Ingres releases could use rule/trigger like Oracle above.
if hasattr(table,'_primarykey'):
modify_tbl_sql = 'modify %s to btree unique on %s' % \
(table._tablename,
', '.join(["'%s'" % x for x in table.primarykey]))
self.execute(modify_tbl_sql)
else:
tmp_seqname='%s_iisq' % table._tablename
query=query.replace(INGRES_SEQNAME, tmp_seqname)
self.execute('create sequence %s' % tmp_seqname)
self.execute(query)
self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
def lastrowid(self,table):
tmp_seqname='%s_iisq' % table
self.execute('select current value for %s' % tmp_seqname)
return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
def integrity_error_class(self):
return ingresdbi.IntegrityError
class IngresUnicodeAdapter(IngresAdapter):
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NCLOB',
'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME,
'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
'list:integer': 'NCLOB',
'list:string': 'NCLOB',
'list:reference': 'NCLOB',
}
class SAPDBAdapter(BaseAdapter):
driver = globals().get('sapdb',None)
support_distributed_transaction = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONG',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONG',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'double': 'FLOAT',
'decimal': 'FIXED(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT PRIMARY KEY',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONG',
'list:string': 'LONG',
'list:reference': 'LONG',
}
def sequence_name(self,table):
return '%s_id_Seq' % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
% (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "sapdb"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri)
if not m:
raise SyntaxError, "Invalid URI string in DAL"
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
def connect(user=user,password=password,database=db,
host=host,driver_args=driver_args):
return self.driver.Connection(user,password,database,
host,**driver_args)
self.pool_connection(connect)
# self.connection.set_client_encoding('UTF8')
self.cursor = self.connection.cursor()
def lastrowid(self,table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return int(self.cursor.fetchone()[0])
class CubridAdapter(MySQLAdapter):
driver = globals().get('cubriddb',None)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "cubrid"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.find_or_make_work_folder()
uri = uri.split('://')[1]
m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri)
if not m:
raise SyntaxError, \
"Invalid URI string in DAL: %s" % self.uri
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError, 'User required'
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError, 'Host name required'
db = m.group('db')
if not db:
raise SyntaxError, 'Database name required'
port = int(m.group('port') or '30000')
charset = m.group('charset') or 'utf8'
user=credential_decoder(user),
passwd=credential_decoder(password),
def connect(host,port,db,user,passwd,driver_args=driver_args):
return self.driver.connect(host,port,db,user,passwd,**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
######## GAE MySQL ##########
class DatabaseStoredFile:
web2py_filesystem = False
def __init__(self,db,filename,mode):
if db._adapter.dbengine != 'mysql':
raise RuntimeError, "only MySQL can store metadata .table files in database for now"
self.db = db
self.filename = filename
self.mode = mode
if not self.web2py_filesystem:
self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;")
DatabaseStoredFile.web2py_filesystem = True
self.p=0
self.data = ''
if mode in ('r','rw','a'):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" % filename
rows = self.db.executesql(query)
if rows:
self.data = rows[0][0]
elif os.path.exists(filename):
datafile = open(filename, 'r')
try:
self.data = datafile.read()
finally:
datafile.close()
elif mode in ('r','rw'):
raise RuntimeError, "File %s does not exist" % filename
def read(self, bytes):
data = self.data[self.p:self.p+bytes]
self.p += len(data)
return data
def readline(self):
i = self.data.find('\n',self.p)+1
if i>0:
data, self.p = self.data[self.p:i], i
else:
data, self.p = self.data[self.p:], len(self.data)
return data
def write(self,data):
self.data += data
def close(self):
self.db.executesql("DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename)
query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')" % \
(self.filename, self.data.replace("'","''"))
self.db.executesql(query)
self.db.commit()
@staticmethod
def exists(db,filename):
if os.path.exists(filename):
return True
query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
if db.executesql(query):
return True
return False
class UseDatabaseStoredFile:
def file_exists(self, filename):
return DatabaseStoredFile.exists(self.db,filename)
def file_open(self, filename, mode='rb', lock=True):
return DatabaseStoredFile(self.db,filename,mode)
def file_close(self, fileobj, unlock=True):
fileobj.close()
def file_delete(self,filename):
query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
self.db.executesql(query)
self.db.commit()
class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
def __init__(self, db, uri='google:sql://realm:domain/database', pool_size=0,
folder=None, db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
credential_decoder = lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1]
m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):])
if not m:
raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri
instance = credential_decoder(m.group('instance'))
db = credential_decoder(m.group('db'))
driver_args['instance'] = instance
if not migrate:
driver_args['database'] = db
def connect(driver_args=driver_args):
return rdbms.connect(**driver_args)
self.pool_connection(connect)
self.cursor = self.connection.cursor()
if migrate:
# self.execute('DROP DATABASE %s' % db)
self.execute('CREATE DATABASE IF NOT EXISTS %s' % db)
self.execute('USE %s' % db)
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
class NoSQLAdapter(BaseAdapter):
@staticmethod
def to_unicode(obj):
if isinstance(obj, str):
return obj.decode('utf8')
elif not isinstance(obj, unicode):
return unicode(obj)
return obj
def represent(self, obj, fieldtype):
if isinstance(obj,CALLABLETYPES):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
return fieldtype.encoder(obj)
if isinstance(obj, (Expression, Field)):
raise SyntaxError, "non supported on GAE"
if self.dbengine=='google:datastore' in globals():
if isinstance(fieldtype, gae.Property):
return obj
if fieldtype.startswith('list:'):
if not obj:
obj = []
if not isinstance(obj, (list, tuple)):
obj = [obj]
if obj == '' and not fieldtype[:2] in ['st','te','pa','up']:
return None
if obj != None:
if isinstance(obj, list) and not fieldtype.startswith('list'):
obj = [self.represent(o, fieldtype) for o in obj]
elif fieldtype in ('integer','id'):
obj = long(obj)
elif fieldtype == 'double':
obj = float(obj)
elif fieldtype.startswith('reference'):
if isinstance(obj, (Row, Reference)):
obj = obj['id']
obj = long(obj)
elif fieldtype == 'boolean':
if obj and not str(obj)[0].upper() == 'F':
obj = True
else:
obj = False
elif fieldtype == 'date':
if not isinstance(obj, datetime.date):
(y, m, d) = map(int,str(obj).strip().split('-'))
obj = datetime.date(y, m, d)
elif isinstance(obj,datetime.datetime):
(y, m, d) = (obj.year, obj.month, obj.day)
obj = datetime.date(y, m, d)
elif fieldtype == 'time':
if not isinstance(obj, datetime.time):
time_items = map(int,str(obj).strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
(h, mi, s) = time_items + [0]
obj = datetime.time(h, mi, s)
elif fieldtype == 'datetime':
if not isinstance(obj, datetime.datetime):
(y, m, d) = map(int,str(obj)[:10].strip().split('-'))
time_items = map(int,str(obj)[11:].strip().split(':')[:3])
while len(time_items)<3:
time_items.append(0)
(h, mi, s) = time_items
obj = datetime.datetime(y, m, d, h, mi, s)
elif fieldtype == 'blob':
pass
elif fieldtype.startswith('list:string'):
return map(self.to_unicode,obj)
elif fieldtype.startswith('list:'):
return map(int,obj)
else:
obj = self.to_unicode(obj)
return obj
def _insert(self,table,fields):
return 'insert %s in %s' % (fields, table)
def _count(self,query,distinct=None):
return 'count %s' % repr(query)
def _select(self,query,fields,attributes):
return 'select %s where %s' % (repr(fields), repr(query))
def _delete(self,tablename, query):
return 'delete %s where %s' % (repr(tablename),repr(query))
def _update(self,tablename,query,fields):
return 'update %s (%s) where %s' % (repr(tablename),
repr(fields),repr(query))
def commit(self):
"""
remember: no transactions on many NoSQL
"""
pass
def rollback(self):
"""
remember: no transactions on many NoSQL
"""
pass
def close(self):
"""
remember: no transactions on many NoSQL
"""
pass
# these functions should never be called!
def OR(self,first,second): raise SyntaxError, "Not supported"
def AND(self,first,second): raise SyntaxError, "Not supported"
def AS(self,first,second): raise SyntaxError, "Not supported"
def ON(self,first,second): raise SyntaxError, "Not supported"
def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported"
def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported"
def ADD(self,first,second): raise SyntaxError, "Not supported"
def SUB(self,first,second): raise SyntaxError, "Not supported"
def MUL(self,first,second): raise SyntaxError, "Not supported"
def DIV(self,first,second): raise SyntaxError, "Not supported"
def LOWER(self,first): raise SyntaxError, "Not supported"
def UPPER(self,first): raise SyntaxError, "Not supported"
def EXTRACT(self,first,what): raise SyntaxError, "Not supported"
def AGGREGATE(self,first,what): raise SyntaxError, "Not supported"
def LEFT_JOIN(self): raise SyntaxError, "Not supported"
def RANDOM(self): raise SyntaxError, "Not supported"
def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported"
def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported"
def LIKE(self,first,second): raise SyntaxError, "Not supported"
def drop(self,table,mode): raise SyntaxError, "Not supported"
def alias(self,table,alias): raise SyntaxError, "Not supported"
def migrate_table(self,*a,**b): raise SyntaxError, "Not supported"
def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported"
def prepare(self,key): raise SyntaxError, "Not supported"
def commit_prepared(self,key): raise SyntaxError, "Not supported"
def rollback_prepared(self,key): raise SyntaxError, "Not supported"
def concat_add(self,table): raise SyntaxError, "Not supported"
def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported"
def create_sequence_and_triggers(self, query, table, **args): pass
def log_execute(self,*a,**b): raise SyntaxError, "Not supported"
def execute(self,*a,**b): raise SyntaxError, "Not supported"
def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported"
def lastrowid(self,table): raise SyntaxError, "Not supported"
def integrity_error_class(self): raise SyntaxError, "Not supported"
def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported"
class GAEF(object):
def __init__(self,name,op,value,apply):
self.name=name=='id' and '__key__' or name
self.op=op
self.value=value
self.apply=apply
def __repr__(self):
return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
class GoogleDatastoreAdapter(NoSQLAdapter):
uploads_in_blob = True
types = {}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj, unlock=True): pass
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.types.update({
'boolean': gae.BooleanProperty,
'string': (lambda: gae.StringProperty(multiline=True)),
'text': gae.TextProperty,
'password': gae.StringProperty,
'blob': gae.BlobProperty,
'upload': gae.StringProperty,
'integer': gae.IntegerProperty,
'double': gae.FloatProperty,
'decimal': GAEDecimalProperty,
'date': gae.DateProperty,
'time': gae.TimeProperty,
'datetime': gae.DateTimeProperty,
'id': None,
'reference': gae.IntegerProperty,
'list:string': (lambda: gae.StringListProperty(default=None)),
'list:integer': (lambda: gae.ListProperty(int,default=None)),
'list:reference': (lambda: gae.ListProperty(int,default=None)),
})
self.db = db
self.uri = uri
self.dbengine = 'google:datastore'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self.pool_size = 0
match = re.compile('.*://(?P<namespace>.+)').match(uri)
if match:
namespace_manager.set_namespace(match.group('namespace'))
def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
myfields = {}
for k in table.fields:
if isinstance(polymodel,Table) and k in polymodel.fields():
continue
field = table[k]
attr = {}
if isinstance(field.type, SQLCustomType):
ftype = self.types[field.type.native or field.type.type](**attr)
elif isinstance(field.type, gae.Property):
ftype = field.type
elif field.type.startswith('id'):
continue
elif field.type.startswith('decimal'):
precision, scale = field.type[7:].strip('()').split(',')
precision = int(precision)
scale = int(scale)
ftype = GAEDecimalProperty(precision, scale, **attr)
elif field.type.startswith('reference'):
if field.notnull:
attr = dict(required=True)
referenced = field.type[10:].strip()
ftype = self.types[field.type[:9]](table._db[referenced])
elif field.type.startswith('list:reference'):
if field.notnull:
attr = dict(required=True)
referenced = field.type[15:].strip()
ftype = self.types[field.type[:14]](**attr)
elif field.type.startswith('list:'):
ftype = self.types[field.type](**attr)
elif not field.type in self.types\
or not self.types[field.type]:
raise SyntaxError, 'Field: unknown field type: %s' % field.type
else:
ftype = self.types[field.type](**attr)
myfields[field.name] = ftype
if not polymodel:
table._tableobj = classobj(table._tablename, (gae.Model, ), myfields)
elif polymodel==True:
table._tableobj = classobj(table._tablename, (PolyModel, ), myfields)
elif isinstance(polymodel,Table):
table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError, "polymodel must be None, True, a table or a tablename"
return None
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type in ('text','blob'):
raise SyntaxError, 'AppEngine does not index by: %s' % expression.type
return expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
else:
return expression.op()
elif field_type:
return self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
return ','.join([self.represent(item,field_type) for item in expression])
else:
return str(expression)
### TODO from gql.py Expression
def AND(self,first,second):
a = self.expand(first)
b = self.expand(second)
if b[0].name=='__key__' and a[0].name!='__key__':
return b+a
return a+b
def EQ(self,first,second=None):
if isinstance(second, Key):
return [GAEF(first.name,'=',second,lambda a,b:a==b)]
return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
def NE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
def LT(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'<',second,lambda a,b:a<b)]
def LE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
def GT(self,first,second=None):
if first.type != 'id' or second==0 or second == '0':
return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'>',second,lambda a,b:a>b)]
def GE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)]
else:
second = Key.from_path(first._tablename, long(second))
return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
def INVERT(self,first):
return '-%s' % first.name
def COMMA(self,first,second):
return '%s, %s' % (self.expand(first),self.expand(second))
def BELONGS(self,first,second=None):
if not isinstance(second,(list, tuple)):
raise SyntaxError, "Not supported"
if first.type != 'id':
return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)]
else:
second = [Key.from_path(first._tablename, i) for i in second]
return [GAEF(first.name,'in',second,lambda a,b:a in b)]
def CONTAINS(self,first,second):
if not first.type.startswith('list:'):
raise SyntaxError, "Not supported"
return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)]
def NOT(self,first):
nops = { self.EQ: self.NE,
self.NE: self.EQ,
self.LT: self.GE,
self.GT: self.LE,
self.LE: self.GT,
self.GE: self.LT}
if not isinstance(first,Query):
raise SyntaxError, "Not suported"
nop = nops.get(first.op,None)
if not nop:
raise SyntaxError, "Not suported %s" % first.op.__name__
first.op = nop
return self.expand(first)
def truncate(self,table,mode):
self.db(table._id > 0).delete()
def select_raw(self,query,fields=[],attributes={}):
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
fields = new_fields
if query:
tablename = self.get_table(query)
elif fields:
tablename = fields[0].tablename
query = fields[0].table._id>0
else:
raise SyntaxError, "Unable to determine a tablename"
query = self.filter_tenant(query,[tablename])
tableobj = self.db[tablename]._tableobj
items = tableobj.all()
filters = self.expand(query)
for filter in filters:
if filter.name=='__key__' and filter.op=='>' and filter.value==0:
continue
elif filter.name=='__key__' and filter.op=='=':
if filter.value==0:
items = []
elif isinstance(filter.value, Key):
item = tableobj.get(filter.value)
items = (item and [item]) or []
else:
item = tableobj.get_by_id(filter.value)
items = (item and [item]) or []
elif isinstance(items,list): # i.e. there is a single record!
items = [i for i in items if filter.apply(getattr(item,filter.name),
filter.value)]
else:
if filter.name=='__key__': items.order('__key__')
items = items.filter('%s %s' % (filter.name,filter.op),filter.value)
if not isinstance(items,list):
if attributes.get('left', None):
raise SyntaxError, 'Set: no left join in appengine'
if attributes.get('groupby', None):
raise SyntaxError, 'Set: no groupby in appengine'
orderby = attributes.get('orderby', False)
if orderby:
### THIS REALLY NEEDS IMPROVEMENT !!!
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if isinstance(orderby,Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
for order in orders:
order={'-id':'-__key__','id':'__key__'}.get(order,order)
items = items.order(order)
if attributes.get('limitby', None):
(lmin, lmax) = attributes['limitby']
(limit, offset) = (lmax - lmin, lmin)
items = items.fetch(limit, offset=offset)
fields = self.db[tablename].fields
return (items, tablename, fields)
def select(self,query,fields,attributes):
(items, tablename, fields) = self.select_raw(query,fields,attributes)
# self.db['_lastsql'] = self._select(query,fields,attributes)
rows = [
[t=='id' and int(item.key().id()) or getattr(item, t) for t in fields]
for item in items]
colnames = ['%s.%s' % (tablename, t) for t in fields]
return self.parse(rows, colnames, False)
def count(self,query,distinct=None):
if distinct:
raise RuntimeError, "COUNT DISTINCT not supported"
(items, tablename, fields) = self.select_raw(query)
# self.db['_lastsql'] = self._count(query)
try:
return len(items)
except TypeError:
return items.count(limit=None)
def delete(self,tablename, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer support deleting more than 1000 records.
"""
# self.db['_lastsql'] = self._delete(tablename,query)
(items, tablename, fields) = self.select_raw(query)
# items can be one item or a query
if not isinstance(items,list):
counter = items.count(limit=None)
leftitems = items.fetch(1000)
while len(leftitems):
gae.delete(leftitems)
leftitems = items.fetch(1000)
else:
counter = len(items)
gae.delete(items)
return counter
def update(self,tablename,query,update_fields):
# self.db['_lastsql'] = self._update(tablename,query,update_fields)
(items, tablename, fields) = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
setattr(item, field.name, self.represent(value,field.type))
item.put()
counter += 1
logger.info(str(counter))
return counter
def insert(self,table,fields):
dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
# table._db['_lastsql'] = self._insert(table,fields)
tmp = table._tableobj(**dfields)
tmp.put()
rid = Reference(tmp.key().id())
(rid._table, rid._record) = (table, None)
return rid
def bulk_insert(self,table,items):
parsed_items = []
for item in items:
dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
parsed_items.append(table._tableobj(**dfields))
gae.put(parsed_items)
return True
def uuid2int(uuidv):
return uuid.UUID(uuidv).int
def int2uuid(n):
return str(uuid.UUID(int=n))
class CouchDBAdapter(NoSQLAdapter):
uploads_in_blob = True
types = {
'boolean': bool,
'string': str,
'text': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj, unlock=True): pass
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type=='id':
return "%s._id" % expression.tablename
return BaseAdapter.expand(self,expression,field_type)
def AND(self,first,second):
return '(%s && %s)' % (self.expand(first),self.expand(second))
def OR(self,first,second):
return '(%s || %s)' % (self.expand(first),self.expand(second))
def EQ(self,first,second):
if second is None:
return '(%s == null)' % self.expand(first)
return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
def NE(self,first,second):
if second is None:
return '(%s != null)' % self.expand(first)
return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
def COMMA(self,first,second):
return '%s + %s' % (self.expand(first),self.expand(second))
def represent(self, obj, fieldtype):
value = NoSQLAdapter.represent(self, obj, fieldtype)
if fieldtype=='id':
return repr(str(int(value)))
return repr(not isinstance(value,unicode) and value or value.encode('utf8'))
def __init__(self,db,uri='couchdb://127.0.0.1:5984',
pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.uri = uri
self.dbengine = 'couchdb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self.pool_size = pool_size
url='http://'+uri[10:]
def connect(url=url,driver_args=driver_args):
return couchdb.Server(url,**driver_args)
self.pool_connection(connect)
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
if migrate:
try:
self.connection.create(table._tablename)
except:
pass
def insert(self,table,fields):
id = uuid2int(web2py_uuid())
ctable = self.connection[table._tablename]
values = dict((k.name,NoSQLAdapter.represent(self,v,k.type)) for k,v in fields)
values['_id'] = str(id)
ctable.save(values)
return id
def _select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
for key in set(attributes.keys())-set(('orderby','groupby','limitby',
'required','cache','left',
'distinct','having')):
raise SyntaxError, 'invalid select attribute: %s' % key
new_fields=[]
for item in fields:
if isinstance(item,SQLALL):
new_fields += item.table
else:
new_fields.append(item)
def uid(fd):
return fd=='id' and '_id' or fd
def get(row,fd):
return fd=='id' and int(row['_id']) or row.get(fd,None)
fields = new_fields
tablename = self.get_table(query)
fieldnames = [f.name for f in (fields or self.db[tablename])]
colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\
dict(t=tablename,
query=self.expand(query),
order='%s._id' % tablename,
fields=fields)
return fn, colnames
def select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
fn, colnames = self._select(query,fields,attributes)
tablename = colnames[0].split('.')[0]
ctable = self.connection[tablename]
rows = [cols['value'] for cols in ctable.query(fn)]
return self.parse(rows, colnames, False)
def delete(self,tablename,query):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
assert(tablename == query.first.tablename)
ctable = self.connection[tablename]
try:
del ctable[str(id)]
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
for row in rows:
del ctable[str(row.id)]
return len(rows)
def update(self,tablename,query,fields):
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
ctable = self.connection[tablename]
try:
doc = ctable[str(id)]
for key,value in fields:
doc[key.name] = NoSQLAdapter.represent(self,value,self.db[tablename][key.name].type)
ctable.save(doc)
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
table = self.db[tablename]
for row in rows:
doc = ctable[str(row.id)]
for key,value in fields:
doc[key.name] = NoSQLAdapter.represent(self,value,table[key.name].type)
ctable.save(doc)
return len(rows)
def count(self,query,distinct=None):
if distinct:
raise RuntimeError, "COUNT DISTINCT not supported"
if not isinstance(query,Query):
raise SyntaxError, "Not Supported"
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
return len(rows)
def cleanup(text):
"""
validates that the given text is clean: only contains [0-9a-zA-Z_]
"""
if re.compile('[^0-9a-zA-Z_]').findall(text):
raise SyntaxError, \
'only [0-9a-zA-Z_] allowed in table and field names, received %s' \
% text
return text
class MongoDBAdapter(NoSQLAdapter):
uploads_in_blob = True
types = {
'boolean': bool,
'string': str,
'text': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
def __init__(self,db,uri='mongodb://127.0.0.1:5984/db',
pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=lambda x:x, driver_args={},
adapter_args={}):
self.db = db
self.uri = uri
self.dbengine = 'mongodb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self.pool_size = pool_size
m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self._uri[10:])
if not m:
raise SyntaxError, "Invalid URI string in DAL: %s" % self._uri
host = m.group('host')
if not host:
raise SyntaxError, 'mongodb: host name required'
dbname = m.group('db')
if not dbname:
raise SyntaxError, 'mongodb: db name required'
port = m.group('port') or 27017
driver_args.update(dict(host=host,port=port))
def connect(dbname=dbname,driver_args=driver_args):
return pymongo.Connection(**driver_args)[dbname]
self.pool_connection(connect)
def insert(self,table,fields):
ctable = self.connection[table._tablename]
values = dict((k,self.represent(v,table[k].type)) for k,v in fields)
ctable.insert(values)
return uuid2int(id)
def count(self,query):
raise RuntimeError, "Not implemented"
def select(self,query,fields,attributes):
raise RuntimeError, "Not implemented"
def delete(self,tablename, query):
raise RuntimeError, "Not implemented"
def update(self,tablename,query,fields):
raise RuntimeError, "Not implemented"
########################################################################
# end of adapters
########################################################################
ADAPTERS = {
'sqlite': SQLiteAdapter,
'sqlite:memory': SQLiteAdapter,
'mysql': MySQLAdapter,
'postgres': PostgreSQLAdapter,
'oracle': OracleAdapter,
'mssql': MSSQLAdapter,
'mssql2': MSSQL2Adapter,
'db2': DB2Adapter,
'teradata': TeradataAdapter,
'informix': InformixAdapter,
'firebird': FireBirdAdapter,
'firebird_embedded': FireBirdAdapter,
'ingres': IngresAdapter,
'ingresu': IngresUnicodeAdapter,
'sapdb': SAPDBAdapter,
'cubrid': CubridAdapter,
'jdbc:sqlite': JDBCSQLiteAdapter,
'jdbc:sqlite:memory': JDBCSQLiteAdapter,
'jdbc:postgres': JDBCPostgreSQLAdapter,
'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
'google:datastore': GoogleDatastoreAdapter,
'google:sql': GoogleSQLAdapter,
'couchdb': CouchDBAdapter,
'mongodb': MongoDBAdapter,
}
def sqlhtml_validators(field):
"""
Field type validation, using web2py's validators mechanism.
makes sure the content of a field is in line with the declared
fieldtype
"""
if not have_validators:
return []
field_type, field_length = field.type, field.length
if isinstance(field_type, SQLCustomType):
if hasattr(field_type, 'validator'):
return field_type.validator
else:
field_type = field_type.type
elif not isinstance(field_type,str):
return []
requires=[]
def ff(r,id):
row=r(id)
if not row:
return id
elif hasattr(r, '_format') and isinstance(r._format,str):
return r._format % row
elif hasattr(r, '_format') and callable(r._format):
return r._format(row)
else:
return id
if field_type == 'string':
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'text':
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'password':
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'double':
requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
elif field_type == 'integer':
requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100))
elif field_type.startswith('decimal'):
requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
elif field_type == 'date':
requires.append(validators.IS_DATE())
elif field_type == 'time':
requires.append(validators.IS_TIME())
elif field_type == 'datetime':
requires.append(validators.IS_DATETIME())
elif field.db and field_type.startswith('reference') and \
field_type.find('.') < 0 and \
field_type[10:] in field.db.tables:
referenced = field.db[field_type[10:]]
def repr_ref(id, r=referenced, f=ff): return f(r, id)
field.represent = field.represent or repr_ref
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(field.db,referenced._id,
referenced._format)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(field.db,field)
if field.tablename == field_type[10:]:
return validators.IS_EMPTY_OR(requires)
return requires
elif field.db and field_type.startswith('list:reference') and \
field_type.find('.') < 0 and \
field_type[15:] in field.db.tables:
referenced = field.db[field_type[15:]]
def list_ref_repr(ids, r=referenced, f=ff):
if not ids:
return None
refs = r._db(r._id.belongs(ids)).select(r._id)
return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '')
field.represent = field.represent or list_ref_repr
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(field.db,referenced._id,
referenced._format,multiple=True)
else:
requires = validators.IS_IN_DB(field.db,referenced._id,
multiple=True)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(field.db,field)
return requires
elif field_type.startswith('list:'):
def repr_list(values): return', '.join(str(v) for v in (values or []))
field.represent = field.represent or repr_list
if field.unique:
requires.insert(0,validators.IS_NOT_IN_DB(field.db,field))
sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
if field.notnull and not field_type[:2] in sff:
requires.insert(0, validators.IS_NOT_EMPTY())
elif not field.notnull and field_type[:2] in sff and requires:
requires[-1] = validators.IS_EMPTY_OR(requires[-1])
return requires
def bar_escape(item):
return str(item).replace('|', '||')
def bar_encode(items):
return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
def bar_decode_integer(value):
return [int(x) for x in value.split('|') if x.strip()]
def bar_decode_string(value):
return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()]
class Row(dict):
"""
a dictionary that lets you do d['a'] as well as d.a
this is only used to store a Row
"""
def __getitem__(self, key):
key=str(key)
if key in self.get('_extra',{}):
return self._extra[key]
return dict.__getitem__(self, key)
def __call__(self,key):
return self.__getitem__(key)
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __repr__(self):
return '<Row ' + dict.__repr__(self) + '>'
def __int__(self):
return dict.__getitem__(self,'id')
def __eq__(self,other):
try:
return self.as_dict() == other.as_dict()
except AttributeError:
return False
def __ne__(self,other):
return not (self == other)
def __copy__(self):
return Row(dict(self))
def as_dict(self,datetime_to_str=False):
SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list)
d = dict(self)
for k in copy.copy(d.keys()):
v=d[k]
if d[k] is None:
continue
elif isinstance(v,Row):
d[k]=v.as_dict()
elif isinstance(v,Reference):
d[k]=int(v)
elif isinstance(v,decimal.Decimal):
d[k]=float(v)
elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
if datetime_to_str:
d[k] = v.isoformat().replace('T',' ')[:19]
elif not isinstance(v,SERIALIZABLE_TYPES):
del d[k]
return d
def Row_unpickler(data):
return Row(cPickle.loads(data))
def Row_pickler(data):
return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),)
copy_reg.pickle(Row, Row_pickler, Row_unpickler)
################################################################################
# Everything below should be independent on the specifics of the
# database and should for RDBMs and some NoSQL databases
################################################################################
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class DAL(dict):
"""
an instance of this class represents a database connection
Example::
db = DAL('sqlite://test.db')
db.define_table('tablename', Field('fieldname1'),
Field('fieldname2'))
"""
@staticmethod
def set_folder(folder):
"""
# ## this allows gluon to set a folder for this thread
# ## <<<<<<<<< Should go away as new DAL replaces old sql.py
"""
BaseAdapter.set_folder(folder)
@staticmethod
def distributed_transaction_begin(*instances):
if not instances:
return
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
instances = enumerate(instances)
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError, \
'distributed transaction not suported by %s' % db._dbname
for (i, db) in instances:
db._adapter.distributed_transaction_begin(keys[i])
@staticmethod
def distributed_transaction_commit(*instances):
if not instances:
return
instances = enumerate(instances)
thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread())
keys = ['%s.%i' % (thread_key, i) for (i,db) in instances]
for (i, db) in instances:
if not db._adapter.support_distributed_transaction():
raise SyntaxError, \
'distributed transaction not suported by %s' % db._dbanme
try:
for (i, db) in instances:
db._adapter.prepare(keys[i])
except:
for (i, db) in instances:
db._adapter.rollback_prepared(keys[i])
raise RuntimeError, 'failure to commit distributed transaction'
else:
for (i, db) in instances:
db._adapter.commit_prepared(keys[i])
return
def __init__(self, uri='sqlite://dummy.db', pool_size=0, folder=None,
db_codec='UTF-8', check_reserved=None,
migrate=True, fake_migrate=False,
migrate_enabled=True, fake_migrate_all=False,
decode_credentials=False, driver_args=None,
adapter_args={}, attempts=5, auto_import=False):
"""
Creates a new Database Abstraction Layer instance.
Keyword arguments:
:uri: string that contains information for connecting to a database.
(default: 'sqlite://dummy.db')
:pool_size: How many open connections to make to the database object.
:folder: <please update me>
:db_codec: string encoding of the database (default: 'UTF-8')
:check_reserved: list of adapters to check tablenames and column names
against sql reserved keywords. (Default None)
* 'common' List of sql keywords that are common to all database types
such as "SELECT, INSERT". (recommended)
* 'all' Checks against all known SQL keywords. (not recommended)
<adaptername> Checks against the specific adapters list of keywords
(recommended)
* '<adaptername>_nonreserved' Checks against the specific adapters
list of nonreserved keywords. (if available)
:migrate (defaults to True) sets default migrate behavior for all tables
:fake_migrate (defaults to False) sets default fake_migrate behavior for all tables
:migrate_enabled (defaults to True). If set to False disables ALL migrations
:fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables
:attempts (defaults to 5). Number of times to attempt connecting
"""
if not decode_credentials:
credential_decoder = lambda cred: cred
else:
credential_decoder = lambda cred: urllib.unquote(cred)
if folder:
self.set_folder(folder)
self._uri = uri
self._pool_size = pool_size
self._db_codec = db_codec
self._lastsql = ''
self._timings = []
self._pending_references = {}
self._request_tenant = 'request_tenant'
self._common_fields = []
if not str(attempts).isdigit() or attempts < 0:
attempts = 5
if uri:
uris = isinstance(uri,(list,tuple)) and uri or [uri]
error = ''
connected = False
for k in range(attempts):
for uri in uris:
try:
if is_jdbc and not uri.startswith('jdbc:'):
uri = 'jdbc:'+uri
self._dbname = regex_dbname.match(uri).group()
if not self._dbname in ADAPTERS:
raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname
# notice that driver args or {} else driver_args defaults to {} global, not correct
args = (self,uri,pool_size,folder,db_codec,credential_decoder,driver_args or {}, adapter_args)
self._adapter = ADAPTERS[self._dbname](*args)
connected = True
break
except SyntaxError:
raise
except Exception, error:
sys.stderr.write('DEBUG_c: Exception %r' % ((Exception, error,),))
if connected:
break
else:
time.sleep(1)
if not connected:
raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, error)
else:
args = (self,'None',0,folder,db_codec)
self._adapter = BaseAdapter(*args)
migrate = fake_migrate = False
adapter = self._adapter
self._uri_hash = hashlib.md5(adapter.uri).hexdigest()
self.tables = SQLCallableList()
self.check_reserved = check_reserved
if self.check_reserved:
from reserved_sql_keywords import ADAPTERS as RSK
self.RSK = RSK
self._migrate = migrate
self._fake_migrate = fake_migrate
self._migrate_enabled = migrate_enabled
self._fake_migrate_all = fake_migrate_all
if auto_import:
self.import_table_definitions(adapter.folder)
def import_table_definitions(self,path,migrate=False,fake_migrate=False):
pattern = os.path.join(path,self._uri_hash+'_*.table')
for filename in glob.glob(pattern):
tfile = self._adapter.file_open(filename, 'r')
try:
sql_fields = cPickle.load(tfile)
name = filename[len(pattern)-7:-6]
mf = [(value['sortable'],Field(key,type=value['type'])) \
for key, value in sql_fields.items()]
mf.sort(lambda a,b: cmp(a[0],b[0]))
self.define_table(name,*[item[1] for item in mf],
**dict(migrate=migrate,fake_migrate=fake_migrate))
finally:
self._adapter.file_close(tfile)
def check_reserved_keyword(self, name):
"""
Validates ``name`` against SQL keywords
Uses self.check_reserve which is a list of
operators to use.
self.check_reserved
['common', 'postgres', 'mysql']
self.check_reserved
['all']
"""
for backend in self.check_reserved:
if name.upper() in self.RSK[backend]:
raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper())
def __contains__(self, tablename):
if self.has_key(tablename):
return True
else:
return False
def parse_as_rest(self,patterns,args,vars,query=None,nested_select=True):
"""
EXAMPLE:
db.define_table('person',Field('name'),Field('info'))
db.define_table('pet',Field('person',db.person),Field('name'),Field('info'))
@request.restful()
def index():
def GET(*kargs,**kvars):
patterns = [
"/persons[person]",
"/{person.name.startswith}",
"/{person.name}/:field",
"/{person.name}/pets[pet.person]",
"/{person.name}/pet[pet.person]/{pet.name}",
"/{person.name}/pet[pet.person]/{pet.name}/:field"
]
parser = db.parse_as_rest(patterns,kargs,kvars)
if parser.status == 200:
return dict(content=parser.response)
else:
raise HTTP(parser.status,parser.error)
def POST(table_name,**kvars):
if table_name == 'person':
return db.person.validate_and_insert(**kvars)
elif table_name == 'pet':
return db.pet.validate_and_insert(**kvars)
else:
raise HTTP(400)
return locals()
"""
db = self
re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
re2 = re.compile('^.+\[.+\]$')
def auto_table(table,base='',depth=0):
patterns = []
for field in db[table].fields:
if base:
tag = '%s/%s' % (base,field.replace('_','-'))
else:
tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-'))
f = db[table][field]
if not f.readable: continue
if f.type=='id' or 'slug' in field or f.type.startswith('reference'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('boolean'):
tag += '/{%s.%s}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('double') or f.type.startswith('integer'):
tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type.startswith('list:'):
tag += '/{%s.%s.contains}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
elif f.type in ('date','datetime'):
tag+= '/{%s.%s.year}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.month}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.day}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if f.type in ('datetime','time'):
tag+= '/{%s.%s.hour}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.minute}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
tag+='/{%s.%s.second}' % (table,field)
patterns.append(tag)
patterns.append(tag+'/:field')
if depth>0:
for rtable,rfield in db[table]._referenced_by:
tag+='/%s[%s.%s]' % (rtable,rtable,rfield)
patterns.append(tag)
patterns += auto_table(rtable,base=tag,depth=depth-1)
return patterns
if patterns=='auto':
patterns=[]
for table in db.tables:
if not table.startswith('auth_'):
patterns += auto_table(table,base='',depth=1)
else:
i = 0
while i<len(patterns):
pattern = patterns[i]
tokens = pattern.split('/')
if tokens[-1].startswith(':auto') and re2.match(tokens[-1]):
new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],'/'.join(tokens[:-1]))
patterns = patterns[:i]+new_patterns+patterns[i+1:]
i += len(new_patterns)
else:
i += 1
if '/'.join(args) == 'patterns':
return Row({'status':200,'pattern':'list',
'error':None,'response':patterns})
for pattern in patterns:
otable=table=None
dbset=db(query)
i=0
tags = pattern[1:].split('/')
# print pattern
if len(tags)!=len(args):
continue
for tag in tags:
# print i, tag, args[i]
if re1.match(tag):
# print 're1:'+tag
tokens = tag[1:-1].split('.')
table, field = tokens[0], tokens[1]
if not otable or table == otable:
if len(tokens)==2 or tokens[2]=='eq':
query = db[table][field]==args[i]
elif tokens[2]=='ne':
query = db[table][field]!=args[i]
elif tokens[2]=='lt':
query = db[table][field]<args[i]
elif tokens[2]=='gt':
query = db[table][field]>args[i]
elif tokens[2]=='ge':
query = db[table][field]>=args[i]
elif tokens[2]=='le':
query = db[table][field]<=args[i]
elif tokens[2]=='year':
query = db[table][field].year()==args[i]
elif tokens[2]=='month':
query = db[table][field].month()==args[i]
elif tokens[2]=='day':
query = db[table][field].day()==args[i]
elif tokens[2]=='hour':
query = db[table][field].hour()==args[i]
elif tokens[2]=='minute':
query = db[table][field].minutes()==args[i]
elif tokens[2]=='second':
query = db[table][field].seconds()==args[i]
elif tokens[2]=='startswith':
query = db[table][field].startswith(args[i])
elif tokens[2]=='contains':
query = db[table][field].contains(args[i])
else:
raise RuntimeError, "invalid pattern: %s" % pattern
if len(tokens)==4 and tokens[3]=='not':
query = ~query
elif len(tokens)>=4:
raise RuntimeError, "invalid pattern: %s" % pattern
dbset=dbset(query)
else:
raise RuntimeError, "missing relation in pattern: %s" % pattern
elif otable and re2.match(tag) and args[i]==tag[:tag.find('[')]:
# print 're2:'+tag
ref = tag[tag.find('[')+1:-1]
if '.' in ref:
table,field = ref.split('.')
# print table,field
if nested_select:
try:
dbset=db(db[table][field].belongs(dbset._select(db[otable]._id)))
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
else:
items = [item.id for item in dbset.select(db[otable]._id)]
dbset=db(db[table][field].belongs(items))
else:
dbset=dbset(db[ref])
elif tag==':field' and table:
# # print 're3:'+tag
field = args[i]
if not field in db[table]: break
try:
item = dbset.select(db[table][field],limitby=(0,1)).first()
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
if not item:
return Row({'status':404,'pattern':pattern,
'error':'record not found','response':None})
else:
return Row({'status':200,'response':item[field],
'pattern':pattern})
elif tag != args[i]:
break
otable = table
i += 1
if i==len(tags) and table:
otable,ofield = vars.get('order','%s.%s' % (table,field)).split('.',1)
try:
if otable[:1]=='~': orderby = ~db[otable[1:]][ofield]
else: orderby = db[otable][ofield]
except KeyError:
return Row({'status':400,'error':'invalid orderby','response':None})
fields = [field for field in db[table] if field.readable]
count = dbset.count()
try:
limits = (int(vars.get('min',0)),int(vars.get('max',1000)))
if limits[0]<0 or limits[1]<limits[0]: raise ValueError
except ValueError:
Row({'status':400,'error':'invalid limits','response':None})
if count > limits[1]-limits[0]:
Row({'status':400,'error':'too many records','response':None})
try:
response = dbset.select(limitby=limits,orderby=orderby,*fields)
except ValueError:
return Row({'status':400,'pattern':pattern,
'error':'invalid path','response':None})
return Row({'status':200,'response':response,'pattern':pattern})
return Row({'status':400,'error':'no matching pattern','response':None})
def define_table(
self,
tablename,
*fields,
**args
):
for key in args:
if key not in [
'migrate',
'primarykey',
'fake_migrate',
'format',
'trigger_name',
'sequence_name',
'polymodel']:
raise SyntaxError, 'invalid table "%s" attribute: %s' % (tablename, key)
migrate = self._migrate_enabled and args.get('migrate',self._migrate)
fake_migrate = self._fake_migrate_all or args.get('fake_migrate',self._fake_migrate)
format = args.get('format',None)
trigger_name = args.get('trigger_name', None)
sequence_name = args.get('sequence_name', None)
primarykey=args.get('primarykey',None)
polymodel=args.get('polymodel',None)
if not isinstance(tablename,str):
raise SyntaxError, "missing table name"
tablename = cleanup(tablename)
lowertablename = tablename.lower()
if tablename.startswith('_') or hasattr(self,lowertablename) or \
regex_python_keywords.match(tablename):
raise SyntaxError, 'invalid table name: %s' % tablename
elif lowertablename in self.tables:
raise SyntaxError, 'table already defined: %s' % tablename
elif self.check_reserved:
self.check_reserved_keyword(tablename)
if self._common_fields:
fields = [f for f in fields] + [f for f in self._common_fields]
t = self[tablename] = Table(self, tablename, *fields,
**dict(primarykey=primarykey,
trigger_name=trigger_name,
sequence_name=sequence_name))
# db magic
if self._uri in (None,'None'):
return t
t._create_references()
if migrate or self._adapter.dbengine=='google:datastore':
try:
sql_locker.acquire()
self._adapter.create_table(t,migrate=migrate,
fake_migrate=fake_migrate,
polymodel=polymodel)
finally:
sql_locker.release()
else:
t._dbt = None
self.tables.append(tablename)
t._format = format
return t
def __iter__(self):
for tablename in self.tables:
yield self[tablename]
def __getitem__(self, key):
return dict.__getitem__(self, str(key))
def __setitem__(self, key, value):
dict.__setitem__(self, str(key), value)
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key[:1]!='_' and key in self:
raise SyntaxError, \
'Object %s exists and cannot be redefined' % key
self[key] = value
def __repr__(self):
return '<DAL ' + dict.__repr__(self) + '>'
def __call__(self, query=None):
if isinstance(query,Table):
query = query._id>0
elif isinstance(query,Field):
query = query!=None
return Set(self, query)
def commit(self):
self._adapter.commit()
def rollback(self):
self._adapter.rollback()
def executesql(self, query, placeholders=None, as_dict=False):
"""
placeholders is optional and will always be None when using DAL
if using raw SQL with placeholders, placeholders may be
a sequence of values to be substituted in
or, *if supported by the DB driver*, a dictionary with keys
matching named placeholders in your SQL.
Added 2009-12-05 "as_dict" optional argument. Will always be
None when using DAL. If using raw SQL can be set to True
and the results cursor returned by the DB driver will be
converted to a sequence of dictionaries keyed with the db
field names. Tested with SQLite but should work with any database
since the cursor.description used to get field names is part of the
Python dbi 2.0 specs. Results returned with as_dict = True are
the same as those returned when applying .to_list() to a DAL query.
[{field1: value1, field2: value2}, {field1: value1b, field2: value2b}]
--bmeredyk
"""
if placeholders:
self._adapter.execute(query, placeholders)
else:
self._adapter.execute(query)
if as_dict:
if not hasattr(self._adapter.cursor,'description'):
raise RuntimeError, "database does not support executesql(...,as_dict=True)"
# Non-DAL legacy db query, converts cursor results to dict.
# sequence of 7-item sequences. each sequence tells about a column.
# first item is always the field name according to Python Database API specs
columns = self._adapter.cursor.description
# reduce the column info down to just the field names
fields = [f[0] for f in columns]
# will hold our finished resultset in a list
data = self._adapter.cursor.fetchall()
# convert the list for each row into a dictionary so it's
# easier to work with. row['field_name'] rather than row[0]
return [dict(zip(fields,row)) for row in data]
# see if any results returned from database
try:
return self._adapter.cursor.fetchall()
except:
return None
def _update_referenced_by(self, other):
for tablename in self.tables:
by = self[tablename]._referenced_by
by[:] = [item for item in by if not item[0] == other]
def export_to_csv_file(self, ofile, *args, **kwargs):
for table in self.tables:
ofile.write('TABLE %s\r\n' % table)
self(self[table]._id > 0).select().export_to_csv_file(ofile, *args, **kwargs)
ofile.write('\r\n\r\n')
ofile.write('END')
def import_from_csv_file(self, ifile, id_map={}, null='<NULL>',
unique='uuid', *args, **kwargs):
for line in ifile:
line = line.strip()
if not line:
continue
elif line == 'END':
return
elif not line.startswith('TABLE ') or not line[6:] in self.tables:
raise SyntaxError, 'invalid file format'
else:
tablename = line[6:]
self[tablename].import_from_csv_file(ifile, id_map, null,
unique, *args, **kwargs)
class SQLALL(object):
"""
Helper class providing a comma-separated string having all the field names
(prefixed by table name and '.')
normally only called from within gluon.sql
"""
def __init__(self, table):
self.table = table
def __str__(self):
return ', '.join([str(field) for field in self.table])
class Reference(int):
def __allocate(self):
if not self._record:
self._record = self._table[int(self)]
if not self._record:
raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self))
def __getattr__(self, key):
if key == 'id':
return int(self)
self.__allocate()
return self._record.get(key, None)
def __setattr__(self, key, value):
if key.startswith('_'):
int.__setattr__(self, key, value)
return
self.__allocate()
self._record[key] = value
def __getitem__(self, key):
if key == 'id':
return int(self)
self.__allocate()
return self._record.get(key, None)
def __setitem__(self,key,value):
self.__allocate()
self._record[key] = value
def Reference_unpickler(data):
return marshal.loads(data)
def Reference_pickler(data):
try:
marshal_dump = marshal.dumps(int(data))
except AttributeError:
marshal_dump = 'i%s' % struct.pack('<i', int(data))
return (Reference_unpickler, (marshal_dump,))
copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler)
class Table(dict):
"""
an instance of this class represents a database table
Example::
db = DAL(...)
db.define_table('users', Field('name'))
db.users.insert(name='me') # print db.users._insert(...) to see SQL
db.users.drop()
"""
def __init__(
self,
db,
tablename,
*fields,
**args
):
"""
Initializes the table and performs checking on the provided fields.
Each table will have automatically an 'id'.
If a field is of type Table, the fields (excluding 'id') from that table
will be used instead.
:raises SyntaxError: when a supplied field is of incorrect type.
"""
self._tablename = tablename
self._sequence_name = args.get('sequence_name',None) or \
db and db._adapter.sequence_name(tablename)
self._trigger_name = args.get('trigger_name',None) or \
db and db._adapter.trigger_name(tablename)
primarykey = args.get('primarykey', None)
fieldnames,newfields=set(),[]
if primarykey:
if not isinstance(primarykey,list):
raise SyntaxError, \
"primarykey must be a list of fields from table '%s'" \
% tablename
self._primarykey = primarykey
elif not [f for f in fields if isinstance(f,Field) and f.type=='id']:
field = Field('id', 'id')
newfields.append(field)
fieldnames.add('id')
self._id = field
for field in fields:
if not isinstance(field, (Field, Table)):
raise SyntaxError, \
'define_table argument is not a Field or Table: %s' % field
elif isinstance(field, Field) and not field.name in fieldnames:
if hasattr(field, '_db'):
field = copy.copy(field)
newfields.append(field)
fieldnames.add(field.name)
if field.type=='id':
self._id = field
elif isinstance(field, Table):
table = field
for field in table:
if not field.name in fieldnames and not field.type=='id':
newfields.append(copy.copy(field))
fieldnames.add(field.name)
else:
# let's ignore new fields with duplicated names!!!
pass
fields = newfields
self._db = db
tablename = tablename
self.fields = SQLCallableList()
self.virtualfields = []
fields = list(fields)
if db and self._db._adapter.uploads_in_blob==True:
for field in fields:
if isinstance(field, Field) and field.type == 'upload'\
and field.uploadfield is True:
tmp = field.uploadfield = '%s_blob' % field.name
fields.append(self._db.Field(tmp, 'blob', default=''))
lower_fieldnames = set()
reserved = dir(Table) + ['fields']
for field in fields:
if db and db.check_reserved:
db.check_reserved_keyword(field.name)
elif field.name in reserved:
raise SyntaxError, "field name %s not allowed" % field.name
if field.name.lower() in lower_fieldnames:
raise SyntaxError, "duplicate field %s in table %s" \
% (field.name, tablename)
else:
lower_fieldnames.add(field.name.lower())
self.fields.append(field.name)
self[field.name] = field
if field.type == 'id':
self['id'] = field
field.tablename = field._tablename = tablename
field.table = field._table = self
field.db = field._db = self._db
if self._db and field.type!='text' and \
self._db._adapter.maxcharlength < field.length:
field.length = self._db._adapter.maxcharlength
if field.requires == DEFAULT:
field.requires = sqlhtml_validators(field)
self.ALL = SQLALL(self)
if hasattr(self,'_primarykey'):
for k in self._primarykey:
if k not in self.fields:
raise SyntaxError, \
"primarykey must be a list of fields from table '%s " % tablename
else:
self[k].notnull = True
def _validate(self,**vars):
errors = Row()
for key,value in vars.items():
value,error = self[key].validate(value)
if error:
errors[key] = error
return errors
def _create_references(self):
pr = self._db._pending_references
self._referenced_by = []
for fieldname in self.fields:
field=self[fieldname]
if isinstance(field.type,str) and field.type[:10] == 'reference ':
ref = field.type[10:].strip()
if not ref.split():
raise SyntaxError, 'Table: reference to nothing: %s' %ref
refs = ref.split('.')
rtablename = refs[0]
if not rtablename in self._db:
pr[rtablename] = pr.get(rtablename,[]) + [field]
continue
rtable = self._db[rtablename]
if len(refs)==2:
rfieldname = refs[1]
if not hasattr(rtable,'_primarykey'):
raise SyntaxError,\
'keyed tables can only reference other keyed tables (for now)'
if rfieldname not in rtable.fields:
raise SyntaxError,\
"invalid field '%s' for referenced table '%s' in table '%s'" \
% (rfieldname, rtablename, self._tablename)
rtable._referenced_by.append((self._tablename, field.name))
for referee in pr.get(self._tablename,[]):
self._referenced_by.append((referee._tablename,referee.name))
def _filter_fields(self, record, id=False):
return dict([(k, v) for (k, v) in record.items() if k
in self.fields and (self[k].type!='id' or id)])
def _build_query(self,key):
""" for keyed table only """
query = None
for k,v in key.iteritems():
if k in self._primarykey:
if query:
query = query & (self[k] == v)
else:
query = (self[k] == v)
else:
raise SyntaxError, \
'Field %s is not part of the primary key of %s' % \
(k,self._tablename)
return query
def __getitem__(self, key):
if not key:
return None
elif isinstance(key, dict):
""" for keyed table """
query = self._build_query(key)
rows = self._db(query).select()
if rows:
return rows[0]
return None
elif str(key).isdigit():
return self._db(self._id == key).select(limitby=(0,1)).first()
elif key:
return dict.__getitem__(self, str(key))
def __call__(self, key=DEFAULT, **kwargs):
if key!=DEFAULT:
if isinstance(key, Query):
record = self._db(key).select(limitby=(0,1)).first()
elif not str(key).isdigit():
record = None
else:
record = self._db(self._id == key).select(limitby=(0,1)).first()
if record:
for k,v in kwargs.items():
if record[k]!=v: return None
return record
elif kwargs:
query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()])
return self._db(query).select(limitby=(0,1)).first()
else:
return None
def __setitem__(self, key, value):
if isinstance(key, dict) and isinstance(value, dict):
""" option for keyed table """
if set(key.keys()) == set(self._primarykey):
value = self._filter_fields(value)
kv = {}
kv.update(value)
kv.update(key)
if not self.insert(**kv):
query = self._build_query(key)
self._db(query).update(**self._filter_fields(value))
else:
raise SyntaxError,\
'key must have all fields from primary key: %s'%\
(self._primarykey)
elif str(key).isdigit():
if key == 0:
self.insert(**self._filter_fields(value))
elif not self._db(self._id == key)\
.update(**self._filter_fields(value)):
raise SyntaxError, 'No such record: %s' % key
else:
if isinstance(key, dict):
raise SyntaxError,\
'value must be a dictionary: %s' % value
dict.__setitem__(self, str(key), value)
def __delitem__(self, key):
if isinstance(key, dict):
query = self._build_query(key)
if not self._db(query).delete():
raise SyntaxError, 'No such record: %s' % key
elif not str(key).isdigit() or not self._db(self._id == key).delete():
raise SyntaxError, 'No such record: %s' % key
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self:
raise SyntaxError, 'Object exists and cannot be redefined: %s' % key
self[key] = value
def __iter__(self):
for fieldname in self.fields:
yield self[fieldname]
def __repr__(self):
return '<Table ' + dict.__repr__(self) + '>'
def __str__(self):
if self.get('_ot', None):
return '%s AS %s' % (self._ot, self._tablename)
return self._tablename
def _drop(self, mode = ''):
return self._db._adapter._drop(self, mode)
def drop(self, mode = ''):
return self._db._adapter.drop(self,mode)
def _listify(self,fields,update=False):
new_fields = []
new_fields_names = []
for name in fields:
if not name in self.fields:
if name != 'id':
raise SyntaxError, 'Field %s does not belong to the table' % name
else:
new_fields.append((self[name],fields[name]))
new_fields_names.append(name)
for ofield in self:
if not ofield.name in new_fields_names:
if not update and ofield.default!=None:
new_fields.append((ofield,ofield.default))
elif update and ofield.update!=None:
new_fields.append((ofield,ofield.update))
for ofield in self:
if not ofield.name in new_fields_names and ofield.compute:
try:
new_fields.append((ofield,ofield.compute(Row(fields))))
except KeyError:
pass
if not update and ofield.required and not ofield.name in new_fields_names:
raise SyntaxError,'Table: missing required field: %s' % ofield.name
return new_fields
def _insert(self, **fields):
return self._db._adapter._insert(self,self._listify(fields))
def insert(self, **fields):
return self._db._adapter.insert(self,self._listify(fields))
def validate_and_insert(self,**fields):
response = Row()
response.errors = self._validate(**fields)
if not response.errors:
response.id = self.insert(**fields)
else:
response.id = None
return response
def update_or_insert(self, key=DEFAULT, **values):
if key==DEFAULT:
record = self(**values)
else:
record = self(key)
if record:
record.update_record(**values)
newid = None
else:
newid = self.insert(**values)
return newid
def bulk_insert(self, items):
"""
here items is a list of dictionaries
"""
items = [self._listify(item) for item in items]
return self._db._adapter.bulk_insert(self,items)
def _truncate(self, mode = None):
return self._db._adapter._truncate(self, mode)
def truncate(self, mode = None):
return self._db._adapter.truncate(self, mode)
def import_from_csv_file(
self,
csvfile,
id_map=None,
null='<NULL>',
unique='uuid',
*args, **kwargs
):
"""
import records from csv file. Column headers must have same names as
table fields. field 'id' is ignored. If column names read 'table.file'
the 'table.' prefix is ignored.
'unique' argument is a field which must be unique
(typically a uuid field)
"""
delimiter = kwargs.get('delimiter', ',')
quotechar = kwargs.get('quotechar', '"')
quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting)
colnames = None
if isinstance(id_map, dict):
if not self._tablename in id_map:
id_map[self._tablename] = {}
id_map_self = id_map[self._tablename]
def fix(field, value, id_map):
if value == null:
value = None
elif field.type=='blob':
value = base64.b64decode(value)
elif field.type=='double':
if not value.strip():
value = None
else:
value = float(value)
elif field.type=='integer':
if not value.strip():
value = None
else:
value = int(value)
elif field.type.startswith('list:string'):
value = bar_decode_string(value)
elif field.type.startswith('list:reference'):
ref_table = field.type[10:].strip()
value = [id_map[ref_table][int(v)] \
for v in bar_decode_string(value)]
elif field.type.startswith('list:'):
value = bar_decode_integer(value)
elif id_map and field.type.startswith('reference'):
try:
value = id_map[field.type[9:].strip()][value]
except KeyError:
pass
return (field.name, value)
def is_id(colname):
if colname in self:
return self[colname].type == 'id'
else:
return False
for line in reader:
if not line:
break
if not colnames:
colnames = [x.split('.',1)[-1] for x in line][:len(line)]
cols, cid = [], []
for i,colname in enumerate(colnames):
if is_id(colname):
cid = i
else:
cols.append(i)
if colname == unique:
unique_idx = i
else:
items = [fix(self[colnames[i]], line[i], id_map) \
for i in cols if colnames[i] in self.fields]
# Validation. Check for duplicate of 'unique' &,
# if present, update instead of insert.
if not unique or unique not in colnames:
new_id = self.insert(**dict(items))
else:
unique_value = line[unique_idx]
query = self._db[self][unique] == unique_value
record = self._db(query).select().first()
if record:
record.update_record(**dict(items))
new_id = record[self._id.name]
else:
new_id = self.insert(**dict(items))
if id_map and cid != []:
id_map_self[line[cid]] = new_id
def with_alias(self, alias):
return self._db._adapter.alias(self,alias)
def on(self, query):
return Expression(self._db,self._db._adapter.ON,self,query)
class Expression(object):
def __init__(
self,
db,
op,
first=None,
second=None,
type=None,
):
self.db = db
self.op = op
self.first = first
self.second = second
### self._tablename = first._tablename ## CHECK
if not type and first and hasattr(first,'type'):
self.type = first.type
else:
self.type = type
def sum(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type)
def max(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type)
def min(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type)
def len(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer')
def lower(self):
return Expression(self.db, self.db._adapter.LOWER, self, None, self.type)
def upper(self):
return Expression(self.db, self.db._adapter.UPPER, self, None, self.type)
def year(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer')
def month(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer')
def day(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer')
def hour(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer')
def minutes(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer')
def coalesce_zero(self):
return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type)
def seconds(self):
return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer')
def __getslice__(self, start, stop):
if start < 0:
pos0 = '(%s - %d)' % (self.len(), abs(start) - 1)
else:
pos0 = start + 1
if stop < 0:
length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0)
elif stop == sys.maxint:
length = self.len()
else:
length = '(%s - %s)' % (stop + 1, pos0)
return Expression(self.db,self.db._adapter.SUBSTRING,
self, (pos0, length), self.type)
def __getitem__(self, i):
return self[i:i + 1]
def __str__(self):
return self.db._adapter.expand(self,self.type)
def __or__(self, other): # for use in sortby
return Expression(self.db,self.db._adapter.COMMA,self,other,self.type)
def __invert__(self):
if hasattr(self,'_op') and self.op == self.db._adapter.INVERT:
return self.first
return Expression(self.db,self.db._adapter.INVERT,self,type=self.type)
def __add__(self, other):
return Expression(self.db,self.db._adapter.ADD,self,other,self.type)
def __sub__(self, other):
if self.type == 'integer':
result_type = 'integer'
elif self.type in ['date','time','datetime','double']:
result_type = 'double'
else:
raise SyntaxError, "subtraction operation not supported for type"
return Expression(self.db,self.db._adapter.SUB,self,other,
result_type)
def __mul__(self, other):
return Expression(self.db,self.db._adapter.MUL,self,other,self.type)
def __div__(self, other):
return Expression(self.db,self.db._adapter.DIV,self,other,self.type)
def __mod__(self, other):
return Expression(self.db,self.db._adapter.MOD,self,other,self.type)
def __eq__(self, value):
return Query(self.db, self.db._adapter.EQ, self, value)
def __ne__(self, value):
return Query(self.db, self.db._adapter.NE, self, value)
def __lt__(self, value):
return Query(self.db, self.db._adapter.LT, self, value)
def __le__(self, value):
return Query(self.db, self.db._adapter.LE, self, value)
def __gt__(self, value):
return Query(self.db, self.db._adapter.GT, self, value)
def __ge__(self, value):
return Query(self.db, self.db._adapter.GE, self, value)
def like(self, value):
return Query(self.db, self.db._adapter.LIKE, self, value)
def belongs(self, value):
return Query(self.db, self.db._adapter.BELONGS, self, value)
def startswith(self, value):
if not self.type in ('string', 'text'):
raise SyntaxError, "startswith used with incompatible field type"
return Query(self.db, self.db._adapter.STARTSWITH, self, value)
def endswith(self, value):
if not self.type in ('string', 'text'):
raise SyntaxError, "endswith used with incompatible field type"
return Query(self.db, self.db._adapter.ENDSWITH, self, value)
def contains(self, value):
if not self.type in ('string', 'text') and not self.type.startswith('list:'):
raise SyntaxError, "contains used with incompatible field type"
return Query(self.db, self.db._adapter.CONTAINS, self, value)
def with_alias(self,alias):
return Expression(self.db,self.db._adapter.AS,self,alias,self.type)
# for use in both Query and sortby
class SQLCustomType(object):
"""
allows defining of custom SQL types
Example::
decimal = SQLCustomType(
type ='double',
native ='integer',
encoder =(lambda x: int(float(x) * 100)),
decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
)
db.define_table(
'example',
Field('value', type=decimal)
)
:param type: the web2py type (default = 'string')
:param native: the backend type
:param encoder: how to encode the value to store it in the backend
:param decoder: how to decode the value retrieved from the backend
:param validator: what validators to use ( default = None, will use the
default validator for type)
"""
def __init__(
self,
type='string',
native=None,
encoder=None,
decoder=None,
validator=None,
_class=None,
):
self.type = type
self.native = native
self.encoder = encoder or (lambda x: x)
self.decoder = decoder or (lambda x: x)
self.validator = validator
self._class = _class or type
def startswith(self, dummy=None):
return False
def __getslice__(self, a=0, b=100):
return None
def __getitem__(self, i):
return None
def __str__(self):
return self._class
class Field(Expression):
"""
an instance of this class represents a database field
example::
a = Field(name, 'string', length=32, default=None, required=False,
requires=IS_NOT_EMPTY(), ondelete='CASCADE',
notnull=False, unique=False,
uploadfield=True, widget=None, label=None, comment=None,
uploadfield=True, # True means store on disk,
# 'a_field_name' means store in this field in db
# False means file content will be discarded.
writable=True, readable=True, update=None, authorize=None,
autodelete=False, represent=None, uploadfolder=None,
uploadseparate=False # upload to separate directories by uuid_keys
# first 2 character and tablename.fieldname
# False - old behavior
# True - put uploaded file in
# <uploaddir>/<tablename>.<fieldname>/uuid_key[:2]
# directory)
to be used as argument of DAL.define_table
allowed field types:
string, boolean, integer, double, text, blob,
date, time, datetime, upload, password
strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql)
fields should have a default or they will be required in SQLFORMs
the requires argument is used to validate the field input in SQLFORMs
"""
def __init__(
self,
fieldname,
type='string',
length=None,
default=DEFAULT,
required=False,
requires=DEFAULT,
ondelete='CASCADE',
notnull=False,
unique=False,
uploadfield=True,
widget=None,
label=None,
comment=None,
writable=True,
readable=True,
update=None,
authorize=None,
autodelete=False,
represent=None,
uploadfolder=None,
uploadseparate=False,
compute=None,
custom_store=None,
custom_retrieve=None,
custom_delete=None,
):
self.db = None
self.op = None
self.first = None
self.second = None
if not isinstance(fieldname,str):
raise SyntaxError, "missing field name"
if fieldname.startswith(':'):
fieldname,readable,writable=fieldname[1:],False,False
elif fieldname.startswith('.'):
fieldname,readable,writable=fieldname[1:],False,False
if '=' in fieldname:
fieldname,default = fieldname.split('=',1)
self.name = fieldname = cleanup(fieldname)
if hasattr(Table,fieldname) or fieldname[0] == '_' or \
regex_python_keywords.match(fieldname):
raise SyntaxError, 'Field: invalid field name: %s' % fieldname
if isinstance(type, Table):
type = 'reference ' + type._tablename
self.type = type # 'string', 'integer'
self.length = (length is None) and MAXCHARLENGTH or length
if default==DEFAULT:
self.default = update or None
else:
self.default = default
self.required = required # is this field required
self.ondelete = ondelete.upper() # this is for reference fields only
self.notnull = notnull
self.unique = unique
self.uploadfield = uploadfield
self.uploadfolder = uploadfolder
self.uploadseparate = uploadseparate
self.widget = widget
self.label = label or ' '.join(item.capitalize() for item in fieldname.split('_'))
self.comment = comment
self.writable = writable
self.readable = readable
self.update = update
self.authorize = authorize
self.autodelete = autodelete
if not represent and type in ('list:integer','list:string'):
represent=lambda x: ', '.join(str(y) for y in x or [])
self.represent = represent
self.compute = compute
self.isattachment = True
self.custom_store = custom_store
self.custom_retrieve = custom_retrieve
self.custom_delete = custom_delete
if self.label is None:
self.label = ' '.join([x.capitalize() for x in
fieldname.split('_')])
if requires is None:
self.requires = []
else:
self.requires = requires
def store(self, file, filename=None, path=None):
if self.custom_store:
return self.custom_store(file,filename,path)
if not filename:
filename = file.name
filename = os.path.basename(filename.replace('/', os.sep)\
.replace('\\', os.sep))
m = re.compile('\.(?P<e>\w{1,5})$').search(filename)
extension = m and m.group('e') or 'txt'
uuid_key = web2py_uuid().replace('-', '')[-16:]
encoded_filename = base64.b16encode(filename).lower()
newfilename = '%s.%s.%s.%s' % \
(self._tablename, self.name, uuid_key, encoded_filename)
newfilename = newfilename[:200] + '.' + extension
if isinstance(self.uploadfield,Field):
blob_uploadfield_name = self.uploadfield.uploadfield
keys={self.uploadfield.name: newfilename,
blob_uploadfield_name: file.read()}
self.uploadfield.table.insert(**keys)
elif self.uploadfield == True:
if path:
pass
elif self.uploadfolder:
path = self.uploadfolder
elif self.db._adapter.folder:
path = os.path.join(self.db._adapter.folder, '..', 'uploads')
else:
raise RuntimeError, "you must specify a Field(...,uploadfolder=...)"
if self.uploadseparate:
path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2])
if not os.path.exists(path):
os.makedirs(path)
pathfilename = os.path.join(path, newfilename)
dest_file = open(pathfilename, 'wb')
try:
shutil.copyfileobj(file, dest_file)
finally:
dest_file.close()
return newfilename
def retrieve(self, name, path=None):
if self.custom_retrieve:
return self.custom_retrieve(name, path)
import http
if self.authorize or isinstance(self.uploadfield, str):
row = self.db(self == name).select().first()
if not row:
raise http.HTTP(404)
if self.authorize and not self.authorize(row):
raise http.HTTP(403)
try:
m = regex_content.match(name)
if not m or not self.isattachment:
raise TypeError, 'Can\'t retrieve %s' % name
filename = base64.b16decode(m.group('name'), True)
filename = regex_cleanup_fn.sub('_', filename)
except (TypeError, AttributeError):
filename = name
if isinstance(self.uploadfield, str): # ## if file is in DB
return (filename, cStringIO.StringIO(row[self.uploadfield] or ''))
elif isinstance(self.uploadfield,Field):
blob_uploadfield_name = self.uploadfield.uploadfield
query = self.uploadfield == name
data = self.uploadfield.table(query)[blob_uploadfield_name]
return (filename, cStringIO.StringIO(data))
else:
# ## if file is on filesystem
if path:
pass
elif self.uploadfolder:
path = self.uploadfolder
else:
path = os.path.join(self.db._adapter.folder, '..', 'uploads')
if self.uploadseparate:
t = m.group('table')
f = m.group('field')
u = m.group('uuidkey')
path = os.path.join(path,"%s.%s" % (t,f),u[:2])
return (filename, open(os.path.join(path, name), 'rb'))
def formatter(self, value):
if value is None or not self.requires:
return value
if not isinstance(self.requires, (list, tuple)):
requires = [self.requires]
elif isinstance(self.requires, tuple):
requires = list(self.requires)
else:
requires = copy.copy(self.requires)
requires.reverse()
for item in requires:
if hasattr(item, 'formatter'):
value = item.formatter(value)
return value
def validate(self, value):
if not self.requires:
return (value, None)
requires = self.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
for validator in requires:
(value, error) = validator(value)
if error:
return (value, error)
return (value, None)
def count(self):
return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer')
def __nonzero__(self):
return True
def __str__(self):
try:
return '%s.%s' % (self.tablename, self.name)
except:
return '<no table>.%s' % self.name
class Query(object):
"""
a query object necessary to define a set.
it can be stored or can be passed to DAL.__call__() to obtain a Set
Example::
query = db.users.name=='Max'
set = db(query)
records = set.select()
"""
def __init__(
self,
db,
op,
first=None,
second=None,
):
self.db = db
self.op = op
self.first = first
self.second = second
def __str__(self):
return self.db._adapter.expand(self)
def __and__(self, other):
return Query(self.db,self.db._adapter.AND,self,other)
def __or__(self, other):
return Query(self.db,self.db._adapter.OR,self,other)
def __invert__(self):
if self.op==self.db._adapter.NOT:
return self.first
return Query(self.db,self.db._adapter.NOT,self)
regex_quotes = re.compile("'[^']*'")
def xorify(orderby):
if not orderby:
return None
orderby2 = orderby[0]
for item in orderby[1:]:
orderby2 = orderby2 | item
return orderby2
class Set(object):
"""
a Set represents a set of records in the database,
the records are identified by the query=Query(...) object.
normally the Set is generated by DAL.__call__(Query(...))
given a set, for example
set = db(db.users.name=='Max')
you can:
set.update(db.users.name='Massimo')
set.delete() # all elements in the set
set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10))
and take subsets:
subset = set(db.users.id<5)
"""
def __init__(self, db, query):
self.db = db
self._db = db # for backward compatibility
self.query = query
def __call__(self, query):
if isinstance(query,Table):
query = query._id>0
elif isinstance(query,Field):
query = query!=None
if self.query:
return Set(self.db, self.query & query)
else:
return Set(self.db, query)
def _count(self,distinct=None):
return self.db._adapter._count(self.query,distinct)
def _select(self, *fields, **attributes):
return self.db._adapter._select(self.query,fields,attributes)
def _delete(self):
tablename=self.db._adapter.get_table(self.query)
return self.db._adapter._delete(tablename,self.query)
def _update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
fields = self.db[tablename]._listify(update_fields,update=True)
return self.db._adapter._update(tablename,self.query,fields)
def isempty(self):
return not self.select(limitby=(0,1))
def count(self,distinct=None):
return self.db._adapter.count(self.query,distinct)
def select(self, *fields, **attributes):
return self.db._adapter.select(self.query,fields,attributes)
def delete(self):
tablename=self.db._adapter.get_table(self.query)
self.delete_uploaded_files()
return self.db._adapter.delete(tablename,self.query)
def update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
fields = self.db[tablename]._listify(update_fields,update=True)
if not fields:
raise SyntaxError, "No fields to update"
self.delete_uploaded_files(update_fields)
return self.db._adapter.update(tablename,self.query,fields)
def validate_and_update(self, **update_fields):
tablename = self.db._adapter.get_table(self.query)
response = Row()
response.errors = self.db[tablename]._validate(**update_fields)
fields = self.db[tablename]._listify(update_fields,update=True)
if not fields:
raise SyntaxError, "No fields to update"
self.delete_uploaded_files(update_fields)
if not response.errors:
response.updated = self.db._adapter.update(tablename,self.query,fields)
else:
response.updated = None
return response
def delete_uploaded_files(self, upload_fields=None):
table = self.db[self.db._adapter.tables(self.query)[0]]
# ## mind uploadfield==True means file is not in DB
if upload_fields:
fields = upload_fields.keys()
else:
fields = table.fields
fields = [f for f in fields if table[f].type == 'upload'
and table[f].uploadfield == True
and table[f].autodelete]
if not fields:
return
for record in self.select(*[table[f] for f in fields]):
for fieldname in fields:
field = table[fieldname]
oldname = record.get(fieldname, None)
if not oldname:
continue
if upload_fields and oldname == upload_fields[fieldname]:
continue
if field.custom_delete:
field.custom_delete(oldname)
else:
uploadfolder = field.uploadfolder
if not uploadfolder:
uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads')
if field.uploadseparate:
items = oldname.split('.')
uploadfolder = os.path.join(uploadfolder,
"%s.%s" % (items[0], items[1]),
items[2][:2])
oldpath = os.path.join(uploadfolder, oldname)
if os.path.exists(oldpath):
os.unlink(oldpath)
def update_record(pack, a={}):
(colset, table, id) = pack
b = a or dict(colset)
c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id'])
table._db(table._id==id).update(**c)
for (k, v) in c.items():
colset[k] = v
class Rows(object):
"""
A wrapper for the return value of a select. It basically represents a table.
It has an iterator and each row is represented as a dictionary.
"""
# ## TODO: this class still needs some work to care for ID/OID
def __init__(
self,
db=None,
records=[],
colnames=[],
compact=True,
rawrows=None
):
self.db = db
self.records = records
self.colnames = colnames
self.compact = compact
self.response = rawrows
def setvirtualfields(self,**keyed_virtualfields):
if not keyed_virtualfields:
return self
for row in self.records:
for (tablename,virtualfields) in keyed_virtualfields.items():
attributes = dir(virtualfields)
virtualfields.__dict__.update(row)
if not tablename in row:
box = row[tablename] = Row()
else:
box = row[tablename]
for attribute in attributes:
if attribute[0] != '_':
method = getattr(virtualfields,attribute)
if hasattr(method,'im_func') and method.im_func.func_code.co_argcount:
box[attribute]=method()
return self
def __and__(self,other):
if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects'
records = self.records+other.records
return Rows(self.db,records,self.colnames)
def __or__(self,other):
if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects'
records = self.records
records += [record for record in other.records \
if not record in records]
return Rows(self.db,records,self.colnames)
def __nonzero__(self):
if len(self.records):
return 1
return 0
def __len__(self):
return len(self.records)
def __getslice__(self, a, b):
return Rows(self.db,self.records[a:b],self.colnames)
def __getitem__(self, i):
row = self.records[i]
keys = row.keys()
if self.compact and len(keys) == 1 and keys[0] != '_extra':
return row[row.keys()[0]]
return row
def __iter__(self):
"""
iterator over records
"""
for i in xrange(len(self)):
yield self[i]
def __str__(self):
"""
serializes the table into a csv file
"""
s = cStringIO.StringIO()
self.export_to_csv_file(s)
return s.getvalue()
def first(self):
if not self.records:
return None
return self[0]
def last(self):
if not self.records:
return None
return self[-1]
def find(self,f):
"""
returns a new Rows object, a subset of the original object,
filtered by the function f
"""
if not self.records:
return Rows(self.db, [], self.colnames)
records = []
for i in range(0,len(self)):
row = self[i]
if f(row):
records.append(self.records[i])
return Rows(self.db, records, self.colnames)
def exclude(self, f):
"""
removes elements from the calling Rows object, filtered by the function f,
and returns a new Rows object containing the removed elements
"""
if not self.records:
return Rows(self.db, [], self.colnames)
removed = []
i=0
while i<len(self):
row = self[i]
if f(row):
removed.append(self.records[i])
del self.records[i]
else:
i += 1
return Rows(self.db, removed, self.colnames)
def sort(self, f, reverse=False):
"""
returns a list of sorted elements (not sorted in place)
"""
return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames)
def as_list(self,
compact=True,
storage_to_dict=True,
datetime_to_str=True):
"""
returns the data as a list or dictionary.
:param storage_to_dict: when True returns a dict, otherwise a list(default True)
:param datetime_to_str: convert datetime fields as strings (default True)
"""
(oc, self.compact) = (self.compact, compact)
if storage_to_dict:
items = [item.as_dict(datetime_to_str) for item in self]
else:
items = [item for item in self]
self.compact = compact
return items
def as_dict(self,
key='id',
compact=True,
storage_to_dict=True,
datetime_to_str=True):
"""
returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False)
:param key: the name of the field to be used as dict key, normally the id
:param compact: ? (default True)
:param storage_to_dict: when True returns a dict, otherwise a list(default True)
:param datetime_to_str: convert datetime fields as strings (default True)
"""
rows = self.as_list(compact, storage_to_dict, datetime_to_str)
if isinstance(key,str) and key.count('.')==1:
(table, field) = key.split('.')
return dict([(r[table][field],r) for r in rows])
elif isinstance(key,str):
return dict([(r[key],r) for r in rows])
else:
return dict([(key(r),r) for r in rows])
def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
"""
export data to csv, the first line contains the column names
:param ofile: where the csv must be exported to
:param null: how null values must be represented (default '<NULL>')
:param delimiter: delimiter to separate values (default ',')
:param quotechar: character to use to quote string values (default '"')
:param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL)
:param represent: use the fields .represent value (default False)
:param colnames: list of column names to use (default self.colnames)
This will only work when exporting rows objects!!!!
DO NOT use this with db.export_to_csv()
"""
delimiter = kwargs.get('delimiter', ',')
quotechar = kwargs.get('quotechar', '"')
quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL)
represent = kwargs.get('represent', False)
writer = csv.writer(ofile, delimiter=delimiter,
quotechar=quotechar, quoting=quoting)
colnames = kwargs.get('colnames', self.colnames)
# a proper csv starting with the column names
writer.writerow(colnames)
def none_exception(value):
"""
returns a cleaned up value that can be used for csv export:
- unicode text is encoded as such
- None values are replaced with the given representation (default <NULL>)
"""
if value is None:
return null
elif isinstance(value, unicode):
return value.encode('utf8')
elif isinstance(value,Reference):
return int(value)
elif hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
elif isinstance(value, (list,tuple)): # for type='list:..'
return bar_encode(value)
return value
for record in self:
row = []
for col in colnames:
if not table_field.match(col):
row.append(record._extra[col])
else:
(t, f) = col.split('.')
field = self.db[t][f]
if isinstance(record.get(t, None), (Row,dict)):
value = record[t][f]
else:
value = record[f]
if field.type=='blob' and value!=None:
value = base64.b64encode(value)
elif represent and field.represent:
value = field.represent(value)
row.append(none_exception(value))
writer.writerow(row)
def xml(self):
"""
serializes the table using sqlhtml.SQLTABLE (if present)
"""
import sqlhtml
return sqlhtml.SQLTABLE(self).xml()
def json(self, mode='object', default=None):
"""
serializes the table to a JSON list of objects
"""
mode = mode.lower()
if not mode in ['object', 'array']:
raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode
def inner_loop(record, col):
(t, f) = col.split('.')
res = None
if not table_field.match(col):
res = record._extra[col]
else:
if isinstance(record.get(t, None), Row):
res = record[t][f]
else:
res = record[f]
if mode == 'object':
return (f, res)
else:
return res
if mode == 'object':
items = [dict([inner_loop(record, col) for col in
self.colnames]) for record in self]
else:
items = [[inner_loop(record, col) for col in self.colnames]
for record in self]
if have_serializers:
return serializers.json(items,default=default or serializers.custom_json)
else:
import simplejson
return simplejson.dumps(items)
def Rows_unpickler(data):
return cPickle.loads(data)
def Rows_pickler(data):
return Rows_unpickler, \
(cPickle.dumps(data.as_list(storage_to_dict=True,
datetime_to_str=False)),)
copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler)
################################################################################
# dummy function used to define some doctests
################################################################################
def test_all():
"""
>>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\")
>>> if len(sys.argv)>1: db = DAL(sys.argv[1])
>>> tmp = db.define_table('users',\
Field('stringf', 'string', length=32, required=True),\
Field('booleanf', 'boolean', default=False),\
Field('passwordf', 'password', notnull=True),\
Field('uploadf', 'upload'),\
Field('blobf', 'blob'),\
Field('integerf', 'integer', unique=True),\
Field('doublef', 'double', unique=True,notnull=True),\
Field('datef', 'date', default=datetime.date.today()),\
Field('timef', 'time'),\
Field('datetimef', 'datetime'),\
migrate='test_user.table')
Insert a field
>>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\
uploadf=None, integerf=5, doublef=3.14,\
datef=datetime.date(2001, 1, 1),\
timef=datetime.time(12, 30, 15),\
datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15))
1
Drop the table
>>> db.users.drop()
Examples of insert, select, update, delete
>>> tmp = db.define_table('person',\
Field('name'),\
Field('birth','date'),\
migrate='test_person.table')
>>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22')
>>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21')
commented len(db().select(db.person.ALL))
commented 2
>>> me = db(db.person.id==person_id).select()[0] # test select
>>> me.name
'Massimo'
>>> db(db.person.name=='Massimo').update(name='massimo') # test update
1
>>> db(db.person.name=='Marco').select().first().delete_record() # test delete
1
Update a single record
>>> me.update_record(name=\"Max\")
>>> me.name
'Max'
Examples of complex search conditions
>>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select())
1
>>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select())
1
>>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select())
1
>>> me = db(db.person.id==person_id).select(db.person.name)[0]
>>> me.name
'Max'
Examples of search conditions using extract from date/datetime/time
>>> len(db(db.person.birth.month()==12).select())
1
>>> len(db(db.person.birth.year()>1900).select())
1
Example of usage of NULL
>>> len(db(db.person.birth==None).select()) ### test NULL
0
>>> len(db(db.person.birth!=None).select()) ### test NULL
1
Examples of search conditions using lower, upper, and like
>>> len(db(db.person.name.upper()=='MAX').select())
1
>>> len(db(db.person.name.like('%ax')).select())
1
>>> len(db(db.person.name.upper().like('%AX')).select())
1
>>> len(db(~db.person.name.upper().like('%AX')).select())
0
orderby, groupby and limitby
>>> people = db().select(db.person.name, orderby=db.person.name)
>>> order = db.person.name|~db.person.birth
>>> people = db().select(db.person.name, orderby=order)
>>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name)
>>> people = db().select(db.person.name, orderby=order, limitby=(0,100))
Example of one 2 many relation
>>> tmp = db.define_table('dog',\
Field('name'),\
Field('birth','date'),\
Field('owner',db.person),\
migrate='test_dog.table')
>>> db.dog.insert(name='Snoopy', birth=None, owner=person_id)
1
A simple JOIN
>>> len(db(db.dog.owner==db.person.id).select())
1
>>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id)))
1
Drop tables
>>> db.dog.drop()
>>> db.person.drop()
Example of many 2 many relation and Set
>>> tmp = db.define_table('author', Field('name'),\
migrate='test_author.table')
>>> tmp = db.define_table('paper', Field('title'),\
migrate='test_paper.table')
>>> tmp = db.define_table('authorship',\
Field('author_id', db.author),\
Field('paper_id', db.paper),\
migrate='test_authorship.table')
>>> aid = db.author.insert(name='Massimo')
>>> pid = db.paper.insert(title='QCD')
>>> tmp = db.authorship.insert(author_id=aid, paper_id=pid)
Define a Set
>>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id))
>>> rows = authored_papers.select(db.author.name, db.paper.title)
>>> for row in rows: print row.author.name, row.paper.title
Massimo QCD
Example of search condition using belongs
>>> set = (1, 2, 3)
>>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL)
>>> print rows[0].title
QCD
Example of search condition using nested select
>>> nested_select = db()._select(db.authorship.paper_id)
>>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL)
>>> print rows[0].title
QCD
Example of expressions
>>> mynumber = db.define_table('mynumber', Field('x', 'integer'))
>>> db(mynumber.id>0).delete()
0
>>> for i in range(10): tmp = mynumber.insert(x=i)
>>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum())
45
>>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2)
5
Output in csv
>>> print str(authored_papers.select(db.author.name, db.paper.title)).strip()
author.name,paper.title\r
Massimo,QCD
Delete all leftover tables
>>> DAL.distributed_transaction_commit(db)
>>> db.mynumber.drop()
>>> db.authorship.drop()
>>> db.author.drop()
>>> db.paper.drop()
"""
################################################################################
# deprecated since the new DAL; here only for backward compatibility
################################################################################
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = Row
SQLDB = DAL
GQLDB = DAL
DAL.Field = Field # was necessary in gluon/globals.py session.connect
DAL.Table = Table # was necessary in gluon/globals.py session.connect
################################################################################
# run tests
################################################################################
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import __builtin__
import os
import re
import sys
import threading
# Install the new import function:
def custom_import_install(web2py_path):
global _web2py_importer
global _web2py_path
if _web2py_importer:
return # Already installed
_web2py_path = web2py_path
_web2py_importer = _Web2pyImporter(web2py_path)
__builtin__.__import__ = _web2py_importer
def is_tracking_changes():
"""
@return: True: neo_importer is tracking changes made to Python source
files. False: neo_import does not reload Python modules.
"""
global _is_tracking_changes
return _is_tracking_changes
def track_changes(track=True):
"""
Tell neo_importer to start/stop tracking changes made to Python modules.
@param track: True: Start tracking changes. False: Stop tracking changes.
"""
global _is_tracking_changes
global _web2py_importer
global _web2py_date_tracker_importer
assert track is True or track is False, "Boolean expected."
if track == _is_tracking_changes:
return
if track:
if not _web2py_date_tracker_importer:
_web2py_date_tracker_importer = \
_Web2pyDateTrackerImporter(_web2py_path)
__builtin__.__import__ = _web2py_date_tracker_importer
else:
__builtin__.__import__ = _web2py_importer
_is_tracking_changes = track
_STANDARD_PYTHON_IMPORTER = __builtin__.__import__ # Keep standard importer
_web2py_importer = None # The standard web2py importer
_web2py_date_tracker_importer = None # The web2py importer with date tracking
_web2py_path = None # Absolute path of the web2py directory
_is_tracking_changes = False # The tracking mode
class _BaseImporter(object):
"""
The base importer. Dispatch the import the call to the standard Python
importer.
"""
def begin(self):
"""
Many imports can be made for a single import statement. This method
help the management of this aspect.
"""
def __call__(self, name, globals={}, locals={}, fromlist=[], level=-1):
"""
The import method itself.
"""
return _STANDARD_PYTHON_IMPORTER(name, globals, locals, fromlist,
level)
def end(self):
"""
Needed for clean up.
"""
class _DateTrackerImporter(_BaseImporter):
"""
An importer tracking the date of the module files and reloading them when
they have changed.
"""
_PACKAGE_PATH_SUFFIX = os.path.sep+"__init__.py"
def __init__(self):
super(_DateTrackerImporter, self).__init__()
self._import_dates = {} # Import dates of the files of the modules
# Avoid reloading cause by file modifications of reload:
self._tl = threading.local()
self._tl._modules_loaded = None
def begin(self):
self._tl._modules_loaded = set()
def __call__(self, name, globals={}, locals={}, fromlist=[], level=-1):
"""
The import method itself.
"""
call_begin_end = self._tl._modules_loaded == None
if call_begin_end:
self.begin()
try:
self._tl.globals = globals
self._tl.locals = locals
self._tl.level = level
# Check the date and reload if needed:
self._update_dates(name, fromlist)
# Try to load the module and update the dates if it works:
result = super(_DateTrackerImporter, self) \
.__call__(name, globals, locals, fromlist, level)
# Module maybe loaded for the 1st time so we need to set the date
self._update_dates(name, fromlist)
return result
except Exception, e:
raise e # Don't hide something that went wrong
finally:
if call_begin_end:
self.end()
def _update_dates(self, name, fromlist):
"""
Update all the dates associated to the statement import. A single
import statement may import many modules.
"""
self._reload_check(name)
if fromlist:
for fromlist_name in fromlist:
self._reload_check("%s.%s" % (name, fromlist_name))
def _reload_check(self, name):
"""
Update the date associated to the module and reload the module if
the file has changed.
"""
module = sys.modules.get(name)
file = self._get_module_file(module)
if file:
date = self._import_dates.get(file)
new_date = None
reload_mod = False
mod_to_pack = False # Module turning into a package? (special case)
try:
new_date = os.path.getmtime(file)
except:
self._import_dates.pop(file, None) # Clean up
# Handle module changing in package and
#package changing in module:
if file.endswith(".py"):
# Get path without file ext:
file = os.path.splitext(file)[0]
reload_mod = os.path.isdir(file) \
and os.path.isfile(file+self._PACKAGE_PATH_SUFFIX)
mod_to_pack = reload_mod
else: # Package turning into module?
file += ".py"
reload_mod = os.path.isfile(file)
if reload_mod:
new_date = os.path.getmtime(file) # Refresh file date
if reload_mod or not date or new_date > date:
self._import_dates[file] = new_date
if reload_mod or (date and new_date > date):
if module not in self._tl._modules_loaded:
if mod_to_pack:
# Module turning into a package:
mod_name = module.__name__
del sys.modules[mod_name] # Delete the module
# Reload the module:
super(_DateTrackerImporter, self).__call__ \
(mod_name, self._tl.globals, self._tl.locals, [],
self._tl.level)
else:
reload(module)
self._tl._modules_loaded.add(module)
def end(self):
self._tl._modules_loaded = None
@classmethod
def _get_module_file(cls, module):
"""
Get the absolute path file associated to the module or None.
"""
file = getattr(module, "__file__", None)
if file:
# Make path absolute if not:
#file = os.path.join(cls.web2py_path, file)
file = os.path.splitext(file)[0]+".py" # Change .pyc for .py
if file.endswith(cls._PACKAGE_PATH_SUFFIX):
file = os.path.dirname(file) # Track dir for packages
return file
class _Web2pyImporter(_BaseImporter):
"""
The standard web2py importer. Like the standard Python importer but it
tries to transform import statements as something like
"import applications.app_name.modules.x". If the import failed, fall back
on _BaseImporter.
"""
_RE_ESCAPED_PATH_SEP = re.escape(os.path.sep) # os.path.sep escaped for re
def __init__(self, web2py_path):
"""
@param web2py_path: The absolute path of the web2py installation.
"""
global DEBUG
super(_Web2pyImporter, self).__init__()
self.web2py_path = web2py_path
self.__web2py_path_os_path_sep = self.web2py_path+os.path.sep
self.__web2py_path_os_path_sep_len = len(self.__web2py_path_os_path_sep)
self.__RE_APP_DIR = re.compile(
self._RE_ESCAPED_PATH_SEP.join( \
( \
#"^" + re.escape(web2py_path), # Not working with Python 2.5
"^(" + "applications",
"[^",
"]+)",
"",
) ))
def _matchAppDir(self, file_path):
"""
Does the file in a directory inside the "applications" directory?
"""
if file_path.startswith(self.__web2py_path_os_path_sep):
file_path = file_path[self.__web2py_path_os_path_sep_len:]
return self.__RE_APP_DIR.match(file_path)
return False
def __call__(self, name, globals={}, locals={}, fromlist=[], level=-1):
"""
The import method itself.
"""
self.begin()
#try:
# if not relative and not from applications:
if not name.startswith(".") and level <= 0 \
and not name.startswith("applications.") \
and isinstance(globals, dict):
# Get the name of the file do the import
caller_file_name = os.path.join(self.web2py_path, \
globals.get("__file__", ""))
# Is the path in an application directory?
match_app_dir = self._matchAppDir(caller_file_name)
if match_app_dir:
try:
# Get the prefix to add for the import
# (like applications.app_name.modules):
modules_prefix = \
".".join((match_app_dir.group(1). \
replace(os.path.sep, "."), "modules"))
if not fromlist:
# import like "import x" or "import x.y"
return self.__import__dot(modules_prefix, name,
globals, locals, fromlist, level)
else:
# import like "from x import a, b, ..."
return super(_Web2pyImporter, self) \
.__call__(modules_prefix+"."+name,
globals, locals, fromlist, level)
except ImportError:
pass
return super(_Web2pyImporter, self).__call__(name, globals, locals,
fromlist, level)
#except Exception, e:
# raise e # Don't hide something that went wrong
#finally:
self.end()
def __import__dot(self, prefix, name, globals, locals, fromlist,
level):
"""
Here we will import x.y.z as many imports like:
from applications.app_name.modules import x
from applications.app_name.modules.x import y
from applications.app_name.modules.x.y import z.
x will be the module returned.
"""
result = None
for name in name.split("."):
new_mod = super(_Web2pyImporter, self).__call__(prefix, globals,
locals, [name], level)
try:
result = result or new_mod.__dict__[name]
except KeyError:
raise ImportError()
prefix += "." + name
return result
class _Web2pyDateTrackerImporter(_Web2pyImporter, _DateTrackerImporter):
"""
Like _Web2pyImporter but using a _DateTrackerImporter.
"""
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Basic caching classes and methods
=================================
- Cache - The generic caching object interfacing with the others
- CacheInRam - providing caching in ram
- CacheInDisk - provides caches on disk
Memcache is also available via a different module (see gluon.contrib.memcache)
When web2py is running on Google App Engine,
caching will be provided by the GAE memcache
(see gluon.contrib.gae_memcache)
"""
import time
import portalocker
import shelve
import thread
import os
import logging
import re
logger = logging.getLogger("web2py.cache")
__all__ = ['Cache']
DEFAULT_TIME_EXPIRE = 300
class CacheAbstract(object):
"""
Abstract class for cache implementations.
Main function is now to provide referenced api documentation.
Use CacheInRam or CacheOnDisk instead which are derived from this class.
"""
cache_stats_name = 'web2py_cache_statistics'
def __init__(self, request=None):
"""
Paremeters
----------
request:
the global request object
"""
raise NotImplementedError
def __call__(self, key, f,
time_expire = DEFAULT_TIME_EXPIRE):
"""
Tries retrieve the value corresponding to `key` from the cache of the
object exists and if it did not expire, else it called the function `f`
and stores the output in the cache corresponding to `key`. In the case
the output of the function is returned.
:param key: the key of the object to be store or retrieved
:param f: the function, whose output is to be cached
:param time_expire: expiration of the cache in microseconds
- `time_expire` is used to compare the current time with the time when
the requested object was last saved in cache. It does not affect
future requests.
- Setting `time_expire` to 0 or negative value forces the cache to
refresh.
If the function `f` is `None` the cache is cleared.
"""
raise NotImplementedError
def clear(self, regex=None):
"""
Clears the cache of all keys that match the provided regular expression.
If no regular expression is provided, it clears all entries in cache.
Parameters
----------
regex:
if provided, only keys matching the regex will be cleared.
Otherwise all keys are cleared.
"""
raise NotImplementedError
def increment(self, key, value=1):
"""
Increments the cached value for the given key by the amount in value
Parameters
----------
key:
key for the cached object to be incremeneted
value:
amount of the increment (defaults to 1, can be negative)
"""
raise NotImplementedError
def _clear(self, storage, regex):
"""
Auxiliary function called by `clear` to search and clear cache entries
"""
r = re.compile(regex)
for (key, value) in storage.items():
if r.match(str(key)):
del storage[key]
class CacheInRam(CacheAbstract):
"""
Ram based caching
This is implemented as global (per process, shared by all threads)
dictionary.
A mutex-lock mechanism avoid conflicts.
"""
locker = thread.allocate_lock()
meta_storage = {}
def __init__(self, request=None):
self.locker.acquire()
self.request = request
if request:
app = request.application
else:
app = ''
if not app in self.meta_storage:
self.storage = self.meta_storage[app] = {CacheAbstract.cache_stats_name: {
'hit_total': 0,
'misses': 0,
}}
else:
self.storage = self.meta_storage[app]
self.locker.release()
def clear(self, regex=None):
self.locker.acquire()
storage = self.storage
if regex == None:
storage.clear()
else:
self._clear(storage, regex)
if not CacheAbstract.cache_stats_name in storage.keys():
storage[CacheAbstract.cache_stats_name] = {
'hit_total': 0,
'misses': 0,
}
self.locker.release()
def __call__(self, key, f,
time_expire = DEFAULT_TIME_EXPIRE):
"""
Attention! cache.ram does not copy the cached object. It just stores a reference to it.
Turns out the deepcopying the object has some problems:
1) would break backward compatibility
2) would be limiting because people may want to cache live objects
3) would work unless we deepcopy no storage and retrival which would make things slow.
Anyway. You can deepcopy explicitly in the function generating the value to be cached.
"""
dt = time_expire
self.locker.acquire()
item = self.storage.get(key, None)
if item and f == None:
del self.storage[key]
self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1
self.locker.release()
if f is None:
return None
if item and (dt == None or item[0] > time.time() - dt):
return item[1]
value = f()
self.locker.acquire()
self.storage[key] = (time.time(), value)
self.storage[CacheAbstract.cache_stats_name]['misses'] += 1
self.locker.release()
return value
def increment(self, key, value=1):
self.locker.acquire()
try:
if key in self.storage:
value = self.storage[key][1] + value
self.storage[key] = (time.time(), value)
except BaseException, e:
self.locker.release()
raise e
self.locker.release()
return value
class CacheOnDisk(CacheAbstract):
"""
Disk based cache
This is implemented as a shelve object and it is shared by multiple web2py
processes (and threads) as long as they share the same filesystem.
The file is locked wen accessed.
Disk cache provides persistance when web2py is started/stopped but it slower
than `CacheInRam`
Values stored in disk cache must be pickable.
"""
speedup_checks = set()
def __init__(self, request, folder=None):
self.request = request
# Lets test if the cache folder exists, if not
# we are going to create it
folder = folder or os.path.join(request.folder, 'cache')
if not os.path.exists(folder):
os.mkdir(folder)
### we need this because of a possible bug in shelve that may
### or may not lock
self.locker_name = os.path.join(folder,'cache.lock')
self.shelve_name = os.path.join(folder,'cache.shelve')
locker, locker_locked = None, False
speedup_key = (folder,CacheAbstract.cache_stats_name)
if not speedup_key in self.speedup_checks or \
not os.path.exists(self.shelve_name):
try:
locker = open(self.locker_name, 'a')
portalocker.lock(locker, portalocker.LOCK_EX)
locker_locked = True
storage = shelve.open(self.shelve_name)
try:
if not storage.has_key(CacheAbstract.cache_stats_name):
storage[CacheAbstract.cache_stats_name] = {
'hit_total': 0,
'misses': 0,
}
storage.sync()
finally:
storage.close()
self.speedup_checks.add(speedup_key)
except ImportError:
pass # no module _bsddb, ignoring exception now so it makes a ticket only if used
except:
logger.error('corrupted file %s, will try delete it!' \
% self.shelve_name)
try:
os.unlink(self.shelve_name)
except IOError:
logger.warn('unable to delete file %s' % self.shelve_name)
if locker_locked:
portalocker.unlock(locker)
if locker:
locker.close()
def clear(self, regex=None):
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage = shelve.open(self.shelve_name)
try:
if regex == None:
storage.clear()
else:
self._clear(storage, regex)
if not CacheAbstract.cache_stats_name in storage.keys():
storage[CacheAbstract.cache_stats_name] = {
'hit_total': 0,
'misses': 0,
}
storage.sync()
finally:
storage.close()
portalocker.unlock(locker)
locker.close()
def __call__(self, key, f,
time_expire = DEFAULT_TIME_EXPIRE):
dt = time_expire
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage = shelve.open(self.shelve_name)
item = storage.get(key, None)
if item and f == None:
del storage[key]
storage[CacheAbstract.cache_stats_name] = {
'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'] + 1,
'misses': storage[CacheAbstract.cache_stats_name]['misses']
}
storage.sync()
portalocker.unlock(locker)
locker.close()
if f is None:
return None
if item and (dt == None or item[0] > time.time() - dt):
return item[1]
value = f()
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage[key] = (time.time(), value)
storage[CacheAbstract.cache_stats_name] = {
'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'],
'misses': storage[CacheAbstract.cache_stats_name]['misses'] + 1
}
storage.sync()
storage.close()
portalocker.unlock(locker)
locker.close()
return value
def increment(self, key, value=1):
locker = open(self.locker_name,'a')
portalocker.lock(locker, portalocker.LOCK_EX)
storage = shelve.open(self.shelve_name)
try:
if key in storage:
value = storage[key][1] + value
storage[key] = (time.time(), value)
storage.sync()
finally:
storage.close()
portalocker.unlock(locker)
locker.close()
return value
class Cache(object):
"""
Sets up generic caching, creating an instance of both CacheInRam and
CacheOnDisk.
In case of GAE will make use of gluon.contrib.gae_memcache.
- self.ram is an instance of CacheInRam
- self.disk is an instance of CacheOnDisk
"""
def __init__(self, request):
"""
Parameters
----------
request:
the global request object
"""
# GAE will have a special caching
import settings
if settings.global_settings.web2py_runtime_gae:
from contrib.gae_memcache import MemcacheClient
self.ram=self.disk=MemcacheClient(request)
else:
# Otherwise use ram (and try also disk)
self.ram = CacheInRam(request)
try:
self.disk = CacheOnDisk(request)
except IOError:
logger.warning('no cache.disk (IOError)')
except AttributeError:
# normally not expected anymore, as GAE has already
# been accounted for
logger.warning('no cache.disk (AttributeError)')
def __call__(self,
key = None,
time_expire = DEFAULT_TIME_EXPIRE,
cache_model = None):
"""
Decorator function that can be used to cache any function/method.
Example::
@cache('key', 5000, cache.ram)
def f():
return time.ctime()
When the function f is called, web2py tries to retrieve
the value corresponding to `key` from the cache of the
object exists and if it did not expire, else it calles the function `f`
and stores the output in the cache corresponding to `key`. In the case
the output of the function is returned.
:param key: the key of the object to be store or retrieved
:param time_expire: expiration of the cache in microseconds
:param cache_model: `cache.ram`, `cache.disk`, or other
(like `cache.memcache` if defined). It defaults to `cache.ram`.
Notes
-----
`time_expire` is used to compare the curret time with the time when the
requested object was last saved in cache. It does not affect future
requests.
Setting `time_expire` to 0 or negative value forces the cache to
refresh.
If the function `f` is an action, we suggest using
`request.env.path_info` as key.
"""
if not cache_model:
cache_model = self.ram
def tmp(func):
def action():
return cache_model(key, func, time_expire)
action.__name___ = func.__name__
action.__doc__ = func.__doc__
return action
return tmp
| Python |
#!/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Contains:
- wsgibase: the gluon wsgi application
"""
import gc
import cgi
import cStringIO
import Cookie
import os
import re
import copy
import sys
import time
import thread
import datetime
import signal
import socket
import tempfile
import random
import string
import platform
from fileutils import abspath, write_file
from settings import global_settings
from admin import add_path_first, create_missing_folders, create_missing_app_folders
from globals import current
from custom_import import custom_import_install
from contrib.simplejson import dumps
# Remarks:
# calling script has inserted path to script directory into sys.path
# applications_parent (path to applications/, site-packages/ etc)
# defaults to that directory set sys.path to
# ("", gluon_parent/site-packages, gluon_parent, ...)
#
# this is wrong:
# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# because we do not want the path to this file which may be Library.zip
# gluon_parent is the directory containing gluon, web2py.py, logging.conf
# and the handlers.
# applications_parent (web2py_path) is the directory containing applications/
# and routes.py
# The two are identical unless web2py_path is changed via the web2py.py -f folder option
# main.web2py_path is the same as applications_parent (for backward compatibility)
if not hasattr(os, 'mkdir'):
global_settings.db_sessions = True
if global_settings.db_sessions is not True:
global_settings.db_sessions = set()
global_settings.gluon_parent = os.environ.get('web2py_path', os.getcwd())
global_settings.applications_parent = global_settings.gluon_parent
web2py_path = global_settings.applications_parent # backward compatibility
global_settings.app_folders = set()
global_settings.debugging = False
custom_import_install(web2py_path)
create_missing_folders()
# set up logging for subsequent imports
import logging
import logging.config
logpath = abspath("logging.conf")
if os.path.exists(logpath):
logging.config.fileConfig(abspath("logging.conf"))
else:
logging.basicConfig()
logger = logging.getLogger("web2py")
from restricted import RestrictedError
from http import HTTP, redirect
from globals import Request, Response, Session
from compileapp import build_environment, run_models_in, \
run_controller_in, run_view_in
from fileutils import copystream
from contenttype import contenttype
from dal import BaseAdapter
from settings import global_settings
from validators import CRYPT
from cache import Cache
from html import URL as Url
import newcron
import rewrite
__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer']
requests = 0 # gc timer
# Security Checks: validate URL and session_id here,
# accept_language is validated in languages
# pattern used to validate client address
regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6
version_info = open(abspath('VERSION', gluon=True), 'r')
web2py_version = version_info.read()
version_info.close()
try:
import rocket
except:
if not global_settings.web2py_runtime_gae:
logger.warn('unable to import Rocket')
rewrite.load()
def get_client(env):
"""
guess the client address from the environment variables
first tries 'http_x_forwarded_for', secondly 'remote_addr'
if all fails assume '127.0.0.1' (running locally)
"""
g = regex_client.search(env.get('http_x_forwarded_for', ''))
if g:
return g.group()
g = regex_client.search(env.get('remote_addr', ''))
if g:
return g.group()
return '127.0.0.1'
def copystream_progress(request, chunk_size= 10**5):
"""
copies request.env.wsgi_input into request.body
and stores progress upload status in cache.ram
X-Progress-ID:length and X-Progress-ID:uploaded
"""
if not request.env.content_length:
return cStringIO.StringIO()
source = request.env.wsgi_input
size = int(request.env.content_length)
dest = tempfile.TemporaryFile()
if not 'X-Progress-ID' in request.vars:
copystream(source, dest, size, chunk_size)
return dest
cache_key = 'X-Progress-ID:'+request.vars['X-Progress-ID']
cache = Cache(request)
cache.ram(cache_key+':length', lambda: size, 0)
cache.ram(cache_key+':uploaded', lambda: 0, 0)
while size > 0:
if size < chunk_size:
data = source.read(size)
cache.ram.increment(cache_key+':uploaded', size)
else:
data = source.read(chunk_size)
cache.ram.increment(cache_key+':uploaded', chunk_size)
length = len(data)
if length > size:
(data, length) = (data[:size], size)
size -= length
if length == 0:
break
dest.write(data)
if length < chunk_size:
break
dest.seek(0)
cache.ram(cache_key+':length', None)
cache.ram(cache_key+':uploaded', None)
return dest
def serve_controller(request, response, session):
"""
this function is used to generate a dynamic page.
It first runs all models, then runs the function in the controller,
and then tries to render the output using a view/template.
this function must run from the [application] folder.
A typical example would be the call to the url
/[application]/[controller]/[function] that would result in a call
to [function]() in applications/[application]/[controller].py
rendered by applications/[application]/views/[controller]/[function].html
"""
# ##################################################
# build environment for controller and view
# ##################################################
environment = build_environment(request, response, session)
# set default view, controller can override it
response.view = '%s/%s.%s' % (request.controller,
request.function,
request.extension)
# also, make sure the flash is passed through
# ##################################################
# process models, controller and view (if required)
# ##################################################
run_models_in(environment)
response._view_environment = copy.copy(environment)
page = run_controller_in(request.controller, request.function, environment)
if isinstance(page, dict):
response._vars = page
for key in page:
response._view_environment[key] = page[key]
run_view_in(response._view_environment)
page = response.body.getvalue()
# logic to garbage collect after exec, not always, once every 100 requests
global requests
requests = ('requests' in globals()) and (requests+1) % 100 or 0
if not requests: gc.collect()
# end garbage collection logic
raise HTTP(response.status, page, **response.headers)
def start_response_aux(status, headers, exc_info, response=None):
"""
in controller you can use::
- request.wsgi.environ
- request.wsgi.start_response
to call third party WSGI applications
"""
response.status = str(status).split(' ',1)[0]
response.headers = dict(headers)
return lambda *args, **kargs: response.write(escape=False,*args,**kargs)
def middleware_aux(request, response, *middleware_apps):
"""
In you controller use::
@request.wsgi.middleware(middleware1, middleware2, ...)
to decorate actions with WSGI middleware. actions must return strings.
uses a simulated environment so it may have weird behavior in some cases
"""
def middleware(f):
def app(environ, start_response):
data = f()
start_response(response.status,response.headers.items())
if isinstance(data,list):
return data
return [data]
for item in middleware_apps:
app=item(app)
def caller(app):
return app(request.wsgi.environ,request.wsgi.start_response)
return lambda caller=caller, app=app: caller(app)
return middleware
def environ_aux(environ,request):
new_environ = copy.copy(environ)
new_environ['wsgi.input'] = request.body
new_environ['wsgi.version'] = 1
return new_environ
def parse_get_post_vars(request, environ):
# always parse variables in URL for GET, POST, PUT, DELETE, etc. in get_vars
dget = cgi.parse_qsl(request.env.query_string or '', keep_blank_values=1)
for (key, value) in dget:
if key in request.get_vars:
if isinstance(request.get_vars[key], list):
request.get_vars[key] += [value]
else:
request.get_vars[key] = [request.get_vars[key]] + [value]
else:
request.get_vars[key] = value
request.vars[key] = request.get_vars[key]
# parse POST variables on POST, PUT, BOTH only in post_vars
request.body = copystream_progress(request) ### stores request body
if (request.body and request.env.request_method in ('POST', 'PUT', 'BOTH')):
dpost = cgi.FieldStorage(fp=request.body,environ=environ,keep_blank_values=1)
# The same detection used by FieldStorage to detect multipart POSTs
is_multipart = dpost.type[:10] == 'multipart/'
request.body.seek(0)
isle25 = sys.version_info[1] <= 5
def listify(a):
return (not isinstance(a,list) and [a]) or a
try:
keys = sorted(dpost)
except TypeError:
keys = []
for key in keys:
dpk = dpost[key]
# if en element is not a file replace it with its value else leave it alone
if isinstance(dpk, list):
if not dpk[0].filename:
value = [x.value for x in dpk]
else:
value = [x for x in dpk]
elif not dpk.filename:
value = dpk.value
else:
value = dpk
pvalue = listify(value)
if key in request.vars:
gvalue = listify(request.vars[key])
if isle25:
value = pvalue + gvalue
elif is_multipart:
pvalue = pvalue[len(gvalue):]
else:
pvalue = pvalue[:-len(gvalue)]
request.vars[key] = value
if len(pvalue):
request.post_vars[key] = (len(pvalue)>1 and pvalue) or pvalue[0]
def wsgibase(environ, responder):
"""
this is the gluon wsgi application. the first function called when a page
is requested (static or dynamic). it can be called by paste.httpserver
or by apache mod_wsgi.
- fills request with info
- the environment variables, replacing '.' with '_'
- adds web2py path and version info
- compensates for fcgi missing path_info and query_string
- validates the path in url
The url path must be either:
1. for static pages:
- /<application>/static/<file>
2. for dynamic pages:
- /<application>[/<controller>[/<function>[/<sub>]]][.<extension>]
- (sub may go several levels deep, currently 3 levels are supported:
sub1/sub2/sub3)
The naming conventions are:
- application, controller, function and extension may only contain
[a-zA-Z0-9_]
- file and sub may also contain '-', '=', '.' and '/'
"""
current.__dict__.clear()
request = Request()
response = Response()
session = Session()
request.env.web2py_path = global_settings.applications_parent
request.env.web2py_version = web2py_version
request.env.update(global_settings)
static_file = False
try:
try:
try:
# ##################################################
# handle fcgi missing path_info and query_string
# select rewrite parameters
# rewrite incoming URL
# parse rewritten header variables
# parse rewritten URL
# serve file if static
# ##################################################
if not environ.get('PATH_INFO',None) and \
environ.get('REQUEST_URI',None):
# for fcgi, get path_info and query_string from request_uri
items = environ['REQUEST_URI'].split('?')
environ['PATH_INFO'] = items[0]
if len(items) > 1:
environ['QUERY_STRING'] = items[1]
else:
environ['QUERY_STRING'] = ''
if not environ.get('HTTP_HOST',None):
environ['HTTP_HOST'] = '%s:%s' % (environ.get('SERVER_NAME'),
environ.get('SERVER_PORT'))
(static_file, environ) = rewrite.url_in(request, environ)
if static_file:
if request.env.get('query_string', '')[:10] == 'attachment':
response.headers['Content-Disposition'] = 'attachment'
response.stream(static_file, request=request)
# ##################################################
# fill in request items
# ##################################################
http_host = request.env.http_host.split(':',1)[0]
local_hosts = [http_host,'::1','127.0.0.1','::ffff:127.0.0.1']
if not global_settings.web2py_runtime_gae:
local_hosts += [socket.gethostname(),
socket.gethostbyname(http_host)]
request.client = get_client(request.env)
request.folder = abspath('applications',
request.application) + os.sep
x_req_with = str(request.env.http_x_requested_with).lower()
request.ajax = x_req_with == 'xmlhttprequest'
request.cid = request.env.http_web2py_component_element
request.is_local = request.env.remote_addr in local_hosts
request.is_https = request.env.wsgi_url_scheme \
in ['https', 'HTTPS'] or request.env.https == 'on'
# ##################################################
# compute a request.uuid to be used for tickets and toolbar
# ##################################################
response.uuid = request.compute_uuid()
# ##################################################
# access the requested application
# ##################################################
if not os.path.exists(request.folder):
if request.application == rewrite.thread.routes.default_application and request.application != 'welcome':
request.application = 'welcome'
redirect(Url(r=request))
elif rewrite.thread.routes.error_handler:
redirect(Url(rewrite.thread.routes.error_handler['application'],
rewrite.thread.routes.error_handler['controller'],
rewrite.thread.routes.error_handler['function'],
args=request.application))
else:
raise HTTP(404,
rewrite.thread.routes.error_message % 'invalid request',
web2py_error='invalid application')
request.url = Url(r=request, args=request.args,
extension=request.raw_extension)
# ##################################################
# build missing folders
# ##################################################
create_missing_app_folders(request)
# ##################################################
# get the GET and POST data
# ##################################################
parse_get_post_vars(request, environ)
# ##################################################
# expose wsgi hooks for convenience
# ##################################################
request.wsgi.environ = environ_aux(environ,request)
request.wsgi.start_response = lambda status='200', headers=[], \
exec_info=None, response=response: \
start_response_aux(status, headers, exec_info, response)
request.wsgi.middleware = lambda *a: middleware_aux(request,response,*a)
# ##################################################
# load cookies
# ##################################################
if request.env.http_cookie:
try:
request.cookies.load(request.env.http_cookie)
except Cookie.CookieError, e:
pass # invalid cookies
# ##################################################
# try load session or create new session file
# ##################################################
session.connect(request, response)
# ##################################################
# set no-cache headers
# ##################################################
response.headers['Content-Type'] = contenttype('.'+request.extension)
response.headers['Cache-Control'] = \
'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'
response.headers['Expires'] = \
time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime())
response.headers['Pragma'] = 'no-cache'
# ##################################################
# run controller
# ##################################################
serve_controller(request, response, session)
except HTTP, http_response:
if static_file:
return http_response.to(responder)
if request.body:
request.body.close()
# ##################################################
# on success, try store session in database
# ##################################################
session._try_store_in_db(request, response)
# ##################################################
# on success, commit database
# ##################################################
if response._custom_commit:
response._custom_commit()
else:
BaseAdapter.close_all_instances('commit')
# ##################################################
# if session not in db try store session on filesystem
# this must be done after trying to commit database!
# ##################################################
session._try_store_on_disk(request, response)
# ##################################################
# store cookies in headers
# ##################################################
if request.cid:
if response.flash and not 'web2py-component-flash' in http_response.headers:
http_response.headers['web2py-component-flash'] = \
str(response.flash).replace('\n','')
if response.js and not 'web2py-component-command' in http_response.headers:
http_response.headers['web2py-component-command'] = \
response.js.replace('\n','')
if session._forget and \
response.session_id_name in response.cookies:
del response.cookies[response.session_id_name]
elif session._secure:
response.cookies[response.session_id_name]['secure'] = True
if len(response.cookies)>0:
http_response.headers['Set-Cookie'] = \
[str(cookie)[11:] for cookie in response.cookies.values()]
ticket=None
except RestrictedError, e:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
ticket = e.log(request) or 'unknown'
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
http_response = \
HTTP(500,
rewrite.thread.routes.error_message_ticket % dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
except:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
try:
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
except:
pass
e = RestrictedError('Framework', '', '', locals())
ticket = e.log(request) or 'unrecoverable'
http_response = \
HTTP(500,
rewrite.thread.routes.error_message_ticket % dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
finally:
if response and hasattr(response, 'session_file') and response.session_file:
response.session_file.close()
# if global_settings.debugging:
# import gluon.debug
# gluon.debug.stop_trace()
session._unlock(response)
http_response, new_environ = rewrite.try_rewrite_on_error(
http_response, request, environ, ticket)
if not http_response:
return wsgibase(new_environ,responder)
if global_settings.web2py_crontype == 'soft':
newcron.softcron(global_settings.applications_parent).start()
return http_response.to(responder)
def save_password(password, port):
"""
used by main() to save the password in the parameters_port.py file.
"""
password_file = abspath('parameters_%i.py' % port)
if password == '<random>':
# make up a new password
chars = string.letters + string.digits
password = ''.join([random.choice(chars) for i in range(8)])
cpassword = CRYPT()(password)[0]
print '******************* IMPORTANT!!! ************************'
print 'your admin password is "%s"' % password
print '*********************************************************'
elif password == '<recycle>':
# reuse the current password if any
if os.path.exists(password_file):
return
else:
password = ''
elif password.startswith('<pam_user:'):
# use the pam password for specified user
cpassword = password[1:-1]
else:
# use provided password
cpassword = CRYPT()(password)[0]
fp = open(password_file, 'w')
if password:
fp.write('password="%s"\n' % cpassword)
else:
fp.write('password=None\n')
fp.close()
def appfactory(wsgiapp=wsgibase,
logfilename='httpserver.log',
profilerfilename='profiler.log'):
"""
generates a wsgi application that does logging and profiling and calls
wsgibase
.. function:: gluon.main.appfactory(
[wsgiapp=wsgibase
[, logfilename='httpserver.log'
[, profilerfilename='profiler.log']]])
"""
if profilerfilename and os.path.exists(profilerfilename):
os.unlink(profilerfilename)
locker = thread.allocate_lock()
def app_with_logging(environ, responder):
"""
a wsgi app that does logging and profiling and calls wsgibase
"""
status_headers = []
def responder2(s, h):
"""
wsgi responder app
"""
status_headers.append(s)
status_headers.append(h)
return responder(s, h)
time_in = time.time()
ret = [0]
if not profilerfilename:
ret[0] = wsgiapp(environ, responder2)
else:
import cProfile
import pstats
logger.warn('profiler is on. this makes web2py slower and serial')
locker.acquire()
cProfile.runctx('ret[0] = wsgiapp(environ, responder2)',
globals(), locals(), profilerfilename+'.tmp')
stat = pstats.Stats(profilerfilename+'.tmp')
stat.stream = cStringIO.StringIO()
stat.strip_dirs().sort_stats("time").print_stats(80)
profile_out = stat.stream.getvalue()
profile_file = open(profilerfilename, 'a')
profile_file.write('%s\n%s\n%s\n%s\n\n' % \
('='*60, environ['PATH_INFO'], '='*60, profile_out))
profile_file.close()
locker.release()
try:
line = '%s, %s, %s, %s, %s, %s, %f\n' % (
environ['REMOTE_ADDR'],
datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
environ['REQUEST_METHOD'],
environ['PATH_INFO'].replace(',', '%2C'),
environ['SERVER_PROTOCOL'],
(status_headers[0])[:3],
time.time() - time_in,
)
if not logfilename:
sys.stdout.write(line)
elif isinstance(logfilename, str):
write_file(logfilename, line, 'a')
else:
logfilename.write(line)
except:
pass
return ret[0]
return app_with_logging
class HttpServer(object):
"""
the web2py web server (Rocket)
"""
def __init__(
self,
ip='127.0.0.1',
port=8000,
password='',
pid_filename='httpserver.pid',
log_filename='httpserver.log',
profiler_filename=None,
ssl_certificate=None,
ssl_private_key=None,
min_threads=None,
max_threads=None,
server_name=None,
request_queue_size=5,
timeout=10,
shutdown_timeout=None, # Rocket does not use a shutdown timeout
path=None,
interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string
):
"""
starts the web server.
"""
if interfaces:
# if interfaces is specified, it must be tested for rocket parameter correctness
# not necessarily completely tested (e.g. content of tuples or ip-format)
import types
if isinstance(interfaces,types.ListType):
for i in interfaces:
if not isinstance(i,types.TupleType):
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
else:
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
if path:
# if a path is specified change the global variables so that web2py
# runs from there instead of cwd or os.environ['web2py_path']
global web2py_path
path = os.path.normpath(path)
web2py_path = path
global_settings.applications_parent = path
os.chdir(path)
[add_path_first(p) for p in (path, abspath('site-packages'), "")]
save_password(password, port)
self.pid_filename = pid_filename
if not server_name:
server_name = socket.gethostname()
logger.info('starting web server...')
rocket.SERVER_NAME = server_name
sock_list = [ip, port]
if not ssl_certificate or not ssl_private_key:
logger.info('SSL is off')
elif not rocket.ssl:
logger.warning('Python "ssl" module unavailable. SSL is OFF')
elif not os.path.exists(ssl_certificate):
logger.warning('unable to open SSL certificate. SSL is OFF')
elif not os.path.exists(ssl_private_key):
logger.warning('unable to open SSL private key. SSL is OFF')
else:
sock_list.extend([ssl_private_key, ssl_certificate])
logger.info('SSL is ON')
app_info = {'wsgi_app': appfactory(wsgibase,
log_filename,
profiler_filename) }
self.server = rocket.Rocket(interfaces or tuple(sock_list),
method='wsgi',
app_info=app_info,
min_threads=min_threads,
max_threads=max_threads,
queue_size=int(request_queue_size),
timeout=int(timeout),
handle_signals=False,
)
def start(self):
"""
start the web server
"""
try:
signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop())
signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop())
except:
pass
write_file(self.pid_filename, str(os.getpid()))
self.server.start()
def stop(self, stoplogging=False):
"""
stop cron and the web server
"""
newcron.stopcron()
self.server.stop(stoplogging)
try:
os.unlink(self.pid_filename)
except:
pass
| Python |
# this file exists for backward compatibility
__all__ = ['DAL','Field','drivers']
from dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
::
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942
# Title: Cross-site scripting (XSS) defense
# Submitter: Josh Goldfoot (other recipes)
# Last Updated: 2006/08/05
# Version no: 1.0
"""
from htmllib import HTMLParser
from cgi import escape
from urlparse import urlparse
from formatter import AbstractFormatter
from htmlentitydefs import entitydefs
from xml.sax.saxutils import quoteattr
__all__ = ['sanitize']
def xssescape(text):
"""Gets rid of < and > and & and, for good measure, :"""
return escape(text, quote=True).replace(':', ':')
class XssCleaner(HTMLParser):
def __init__(
self,
permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
],
allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt'
], 'blockquote': ['type']},
fmt=AbstractFormatter,
strip_disallowed = False
):
HTMLParser.__init__(self, fmt)
self.result = ''
self.open_tags = []
self.permitted_tags = [i for i in permitted_tags if i[-1] != '/']
self.requires_no_close = [i[:-1] for i in permitted_tags
if i[-1] == '/']
self.permitted_tags += self.requires_no_close
self.allowed_attributes = allowed_attributes
# The only schemes allowed in URLs (for href and src attributes).
# Adding "javascript" or "vbscript" to this list would not be smart.
self.allowed_schemes = ['http', 'https', 'ftp']
#to strip or escape disallowed tags?
self.strip_disallowed = strip_disallowed
self.in_disallowed = False
def handle_data(self, data):
if data and not self.in_disallowed:
self.result += xssescape(data)
def handle_charref(self, ref):
if self.in_disallowed:
return
elif len(ref) < 7 and ref.isdigit():
self.result += '&#%s;' % ref
else:
self.result += xssescape('&#%s' % ref)
def handle_entityref(self, ref):
if self.in_disallowed:
return
elif ref in entitydefs:
self.result += '&%s;' % ref
else:
self.result += xssescape('&%s' % ref)
def handle_comment(self, comment):
if self.in_disallowed:
return
elif comment:
self.result += xssescape('<!--%s-->' % comment)
def handle_starttag(
self,
tag,
method,
attrs,
):
if tag not in self.permitted_tags:
if self.strip_disallowed:
self.in_disallowed = True
else:
self.result += xssescape('<%s>' % tag)
else:
bt = '<' + tag
if tag in self.allowed_attributes:
attrs = dict(attrs)
self.allowed_attributes_here = [x for x in
self.allowed_attributes[tag] if x in attrs
and len(attrs[x]) > 0]
for attribute in self.allowed_attributes_here:
if attribute in ['href', 'src', 'background']:
if self.url_is_acceptable(attrs[attribute]):
bt += ' %s="%s"' % (attribute,
attrs[attribute])
else:
bt += ' %s=%s' % (xssescape(attribute),
quoteattr(attrs[attribute]))
if bt == '<a' or bt == '<img':
return
if tag in self.requires_no_close:
bt += ' /'
bt += '>'
self.result += bt
self.open_tags.insert(0, tag)
def handle_endtag(self, tag, attrs):
bracketed = '</%s>' % tag
if tag not in self.permitted_tags:
if self.strip_disallowed:
self.in_disallowed = False
else:
self.result += xssescape(bracketed)
elif tag in self.open_tags:
self.result += bracketed
self.open_tags.remove(tag)
def unknown_starttag(self, tag, attributes):
self.handle_starttag(tag, None, attributes)
def unknown_endtag(self, tag):
self.handle_endtag(tag, None)
def url_is_acceptable(self, url):
"""
Accepts relative and absolute urls
"""
parsed = urlparse(url)
return (parsed[0] in self.allowed_schemes and '.' in parsed[1]) \
or (parsed[0] == '' and parsed[2].startswith('/'))
def strip(self, rawstring, escape=True):
"""
Returns the argument stripped of potentially harmful
HTML or Javascript code
@type escape: boolean
@param escape: If True (default) it escapes the potentially harmful
content, otherwise remove it
"""
if not isinstance(rawstring, str): return str(rawstring)
for tag in self.requires_no_close:
rawstring = rawstring.replace("<%s/>" % tag, "<%s />" % tag)
if not escape:
self.strip_disallowed = True
self.result = ''
self.feed(rawstring)
for endtag in self.open_tags:
if endtag not in self.requires_no_close:
self.result += '</%s>' % endtag
return self.result
def xtags(self):
"""
Returns a printable string informing the user which tags are allowed
"""
tg = ''
for x in sorted(self.permitted_tags):
tg += '<' + x
if x in self.allowed_attributes:
for y in self.allowed_attributes[x]:
tg += ' %s=""' % y
tg += '> '
return xssescape(tg.strip())
def sanitize(text, permitted_tags=[
'a',
'b',
'blockquote',
'br/',
'i',
'li',
'ol',
'ul',
'p',
'cite',
'code',
'pre',
'img/',
'h1','h2','h3','h4','h5','h6',
'table','tr','td','div',
],
allowed_attributes = {
'a': ['href', 'title'],
'img': ['src', 'alt'],
'blockquote': ['type'],
'td': ['colspan'],
},
escape=True):
if not isinstance(text, str): return str(text)
return XssCleaner(permitted_tags=permitted_tags,
allowed_attributes=allowed_attributes).strip(text, escape)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created by Attila Csipa <web2py@csipa.in.rs>
Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu>
"""
import sys
import os
import threading
import logging
import time
import sched
import re
import datetime
import platform
import portalocker
import fileutils
import cPickle
from settings import global_settings
logger = logging.getLogger("web2py.cron")
_cron_stopping = False
def stopcron():
"graceful shutdown of cron"
global _cron_stopping
_cron_stopping = True
class extcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(False)
self.path = applications_parent
crondance(self.path, 'external', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('external cron invocation')
crondance(self.path, 'external', startup=False)
class hardcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.setDaemon(True)
self.path = applications_parent
crondance(self.path, 'hard', startup=True)
def launch(self):
if not _cron_stopping:
logger.debug('hard cron invocation')
crondance(self.path, 'hard', startup = False)
def run(self):
s = sched.scheduler(time.time, time.sleep)
logger.info('Hard cron daemon started')
while not _cron_stopping:
now = time.time()
s.enter(60 - now % 60, 1, self.launch, ())
s.run()
class softcron(threading.Thread):
def __init__(self, applications_parent):
threading.Thread.__init__(self)
self.path = applications_parent
crondance(self.path, 'soft', startup=True)
def run(self):
if not _cron_stopping:
logger.debug('soft cron invocation')
crondance(self.path, 'soft', startup=False)
class Token(object):
def __init__(self,path):
self.path = os.path.join(path, 'cron.master')
if not os.path.exists(self.path):
fileutils.write_file(self.path, '', 'wb')
self.master = None
self.now = time.time()
def acquire(self,startup=False):
"""
returns the time when the lock is acquired or
None if cron already running
lock is implemented by writing a pickle (start, stop) in cron.master
start is time when cron job starts and stop is time when cron completed
stop == 0 if job started but did not yet complete
if a cron job started within less than 60 seconds, acquire returns None
if a cron job started before 60 seconds and did not stop,
a warning is issue "Stale cron.master detected"
"""
if portalocker.LOCK_EX == None:
logger.warning('WEB2PY CRON: Disabled because no file locking')
return None
self.master = open(self.path,'rb+')
try:
ret = None
portalocker.lock(self.master,portalocker.LOCK_EX)
try:
(start, stop) = cPickle.load(self.master)
except:
(start, stop) = (0, 1)
if startup or self.now - start > 59.99:
ret = self.now
if not stop:
# this happens if previous cron job longer than 1 minute
logger.warning('WEB2PY CRON: Stale cron.master detected')
logger.debug('WEB2PY CRON: Acquiring lock')
self.master.seek(0)
cPickle.dump((self.now,0),self.master)
finally:
portalocker.unlock(self.master)
if not ret:
# do this so no need to release
self.master.close()
return ret
def release(self):
"""
this function writes into cron.master the time when cron job
was completed
"""
if not self.master.closed:
portalocker.lock(self.master,portalocker.LOCK_EX)
logger.debug('WEB2PY CRON: Releasing cron lock')
self.master.seek(0)
(start, stop) = cPickle.load(self.master)
if start == self.now: # if this is my lock
self.master.seek(0)
cPickle.dump((self.now,time.time()),self.master)
portalocker.unlock(self.master)
self.master.close()
def rangetolist(s, period='min'):
retval = []
if s.startswith('*'):
if period == 'min':
s = s.replace('*', '0-59', 1)
elif period == 'hr':
s = s.replace('*', '0-23', 1)
elif period == 'dom':
s = s.replace('*', '1-31', 1)
elif period == 'mon':
s = s.replace('*', '1-12', 1)
elif period == 'dow':
s = s.replace('*', '0-6', 1)
m = re.compile(r'(\d+)-(\d+)/(\d+)')
match = m.match(s)
if match:
for i in range(int(match.group(1)), int(match.group(2)) + 1):
if i % int(match.group(3)) == 0:
retval.append(i)
return retval
def parsecronline(line):
task = {}
if line.startswith('@reboot'):
line=line.replace('@reboot', '-1 * * * *')
elif line.startswith('@yearly'):
line=line.replace('@yearly', '0 0 1 1 *')
elif line.startswith('@annually'):
line=line.replace('@annually', '0 0 1 1 *')
elif line.startswith('@monthly'):
line=line.replace('@monthly', '0 0 1 * *')
elif line.startswith('@weekly'):
line=line.replace('@weekly', '0 0 * * 0')
elif line.startswith('@daily'):
line=line.replace('@daily', '0 0 * * *')
elif line.startswith('@midnight'):
line=line.replace('@midnight', '0 0 * * *')
elif line.startswith('@hourly'):
line=line.replace('@hourly', '0 * * * *')
params = line.strip().split(None, 6)
if len(params) < 7:
return None
daysofweek={'sun':0,'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6}
for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']):
if not s in [None, '*']:
task[id] = []
vals = s.split(',')
for val in vals:
if val != '-1' and '-' in val and '/' not in val:
val = '%s/1' % val
if '/' in val:
task[id] += rangetolist(val, id)
elif val.isdigit() or val=='-1':
task[id].append(int(val))
elif id=='dow' and val[:3].lower() in daysofweek:
task[id].append(daysofweek(val[:3].lower()))
task['user'] = params[5]
task['cmd'] = params[6]
return task
class cronlauncher(threading.Thread):
def __init__(self, cmd, shell=True):
threading.Thread.__init__(self)
if platform.system() == 'Windows':
shell = False
elif isinstance(cmd,list):
cmd = ' '.join(cmd)
self.cmd = cmd
self.shell = shell
def run(self):
import subprocess
proc = subprocess.Popen(self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=self.shell)
(stdoutdata,stderrdata) = proc.communicate()
if proc.returncode != 0:
logger.warning(
'WEB2PY CRON Call returned code %s:\n%s' % \
(proc.returncode, stdoutdata+stderrdata))
else:
logger.debug('WEB2PY CRON Call returned success:\n%s' \
% stdoutdata)
def crondance(applications_parent, ctype='soft', startup=False):
apppath = os.path.join(applications_parent,'applications')
cron_path = os.path.join(apppath,'admin','cron')
token = Token(cron_path)
cronmaster = token.acquire(startup=startup)
if not cronmaster:
return
now_s = time.localtime()
checks=(('min',now_s.tm_min),
('hr',now_s.tm_hour),
('mon',now_s.tm_mon),
('dom',now_s.tm_mday),
('dow',(now_s.tm_wday+1)%7))
apps = [x for x in os.listdir(apppath)
if os.path.isdir(os.path.join(apppath, x))]
for app in apps:
if _cron_stopping:
break;
apath = os.path.join(apppath,app)
cronpath = os.path.join(apath, 'cron')
crontab = os.path.join(cronpath, 'crontab')
if not os.path.exists(crontab):
continue
try:
cronlines = fileutils.readlines_file(crontab, 'rt')
lines = [x.strip() for x in cronlines if x.strip() and not x.strip().startswith('#')]
tasks = [parsecronline(cline) for cline in lines]
except Exception, e:
logger.error('WEB2PY CRON: crontab read error %s' % e)
continue
for task in tasks:
if _cron_stopping:
break;
commands = [sys.executable]
w2p_path = fileutils.abspath('web2py.py', gluon=True)
if os.path.exists(w2p_path):
commands.append(w2p_path)
if global_settings.applications_parent != global_settings.gluon_parent:
commands.extend(('-f', global_settings.applications_parent))
citems = [(k in task and not v in task[k]) for k,v in checks]
task_min= task.get('min',[])
if not task:
continue
elif not startup and task_min == [-1]:
continue
elif task_min != [-1] and reduce(lambda a,b: a or b, citems):
continue
logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s' \
% (ctype, app, task.get('cmd'),
os.getcwd(), datetime.datetime.now()))
action, command, models = False, task['cmd'], ''
if command.startswith('**'):
(action,models,command) = (True,'',command[2:])
elif command.startswith('*'):
(action,models,command) = (True,'-M',command[1:])
else:
action=False
if action and command.endswith('.py'):
commands.extend(('-J', # cron job
models, # import models?
'-S', app, # app name
'-a', '"<recycle>"', # password
'-R', command)) # command
shell = True
elif action:
commands.extend(('-J', # cron job
models, # import models?
'-S', app+'/'+command, # app name
'-a', '"<recycle>"')) # password
shell = True
else:
commands = command
shell = False
try:
cronlauncher(commands, shell=shell).start()
except Exception, e:
logger.warning(
'WEB2PY CRON: Execution error for %s: %s' \
% (task.get('cmd'), e))
token.release()
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of the web2py Web Framework (Copyrighted, 2007-2011).
License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Author: Thadeus Burgess
Contributors:
- Thank you to Massimo Di Pierro for creating the original gluon/template.py
- Thank you to Jonathan Lundell for extensively testing the regex on Jython.
- Thank you to Limodou (creater of uliweb) who inspired the block-element support for web2py.
"""
import os
import re
import cgi
import cStringIO
import logging
try:
from restricted import RestrictedError
except:
def RestrictedError(a,b,c):
logging.error(str(a)+':'+str(b)+':'+str(c))
return RuntimeError
class Node(object):
"""
Basic Container Object
"""
def __init__(self, value = None, pre_extend = False):
self.value = value
self.pre_extend = pre_extend
def __str__(self):
return str(self.value)
class SuperNode(Node):
def __init__(self, name = '', pre_extend = False):
self.name = name
self.value = None
self.pre_extend = pre_extend
def __str__(self):
if self.value:
return str(self.value)
else:
raise SyntaxError("Undefined parent block ``%s``. \n" % self.name + \
"You must define a block before referencing it.\nMake sure you have not left out an ``{{end}}`` tag." )
def __repr__(self):
return "%s->%s" % (self.name, self.value)
class BlockNode(Node):
"""
Block Container.
This Node can contain other Nodes and will render in a hierarchical order
of when nodes were added.
ie::
{{ block test }}
This is default block test
{{ end }}
"""
def __init__(self, name = '', pre_extend = False, delimiters = ('{{','}}')):
"""
name - Name of this Node.
"""
self.nodes = []
self.name = name
self.pre_extend = pre_extend
self.left, self.right = delimiters
def __repr__(self):
lines = ['%sblock %s%s' % (self.left,self.name,self.right)]
for node in self.nodes:
lines.append(str(node))
lines.append('%send%s' % (self.left, self.right))
return ''.join(lines)
def __str__(self):
"""
Get this BlockNodes content, not including child Nodes
"""
lines = []
for node in self.nodes:
if not isinstance(node, BlockNode):
lines.append(str(node))
return ''.join(lines)
def append(self, node):
"""
Add an element to the nodes.
Keyword Arguments
- node -- Node object or string to append.
"""
if isinstance(node, str) or isinstance(node, Node):
self.nodes.append(node)
else:
raise TypeError("Invalid type; must be instance of ``str`` or ``BlockNode``. %s" % node)
def extend(self, other):
"""
Extend the list of nodes with another BlockNode class.
Keyword Arguments
- other -- BlockNode or Content object to extend from.
"""
if isinstance(other, BlockNode):
self.nodes.extend(other.nodes)
else:
raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other)
def output(self, blocks):
"""
Merges all nodes into a single string.
blocks -- Dictionary of blocks that are extending
from this template.
"""
lines = []
# Get each of our nodes
for node in self.nodes:
# If we have a block level node.
if isinstance(node, BlockNode):
# If we can override this block.
if node.name in blocks:
# Override block from vars.
lines.append(blocks[node.name].output(blocks))
# Else we take the default
else:
lines.append(node.output(blocks))
# Else its just a string
else:
lines.append(str(node))
# Now combine all of our lines together.
return ''.join(lines)
class Content(BlockNode):
"""
Parent Container -- Used as the root level BlockNode.
Contains functions that operate as such.
"""
def __init__(self, name = "ContentBlock", pre_extend = False):
"""
Keyword Arguments
name -- Unique name for this BlockNode
"""
self.name = name
self.nodes = []
self.blocks = {}
self.pre_extend = pre_extend
def __str__(self):
lines = []
# For each of our nodes
for node in self.nodes:
# If it is a block node.
if isinstance(node, BlockNode):
# And the node has a name that corresponds with a block in us
if node.name in self.blocks:
# Use the overriding output.
lines.append(self.blocks[node.name].output(self.blocks))
else:
# Otherwise we just use the nodes output.
lines.append(node.output(self.blocks))
else:
# It is just a string, so include it.
lines.append(str(node))
# Merge our list together.
return ''.join(lines)
def _insert(self, other, index = 0):
"""
Inserts object at index.
"""
if isinstance(other, str) or isinstance(other, Node):
self.nodes.insert(index, other)
else:
raise TypeError("Invalid type, must be instance of ``str`` or ``Node``.")
def insert(self, other, index = 0):
"""
Inserts object at index.
You may pass a list of objects and have them inserted.
"""
if isinstance(other, (list, tuple)):
# Must reverse so the order stays the same.
other.reverse()
for item in other:
self._insert(item, index)
else:
self._insert(other, index)
def append(self, node):
"""
Adds a node to list. If it is a BlockNode then we assign a block for it.
"""
if isinstance(node, str) or isinstance(node, Node):
self.nodes.append(node)
if isinstance(node, BlockNode):
self.blocks[node.name] = node
else:
raise TypeError("Invalid type, must be instance of ``str`` or ``BlockNode``. %s" % node)
def extend(self, other):
"""
Extends the objects list of nodes with another objects nodes
"""
if isinstance(other, BlockNode):
self.nodes.extend(other.nodes)
self.blocks.update(other.blocks)
else:
raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other)
def clear_content(self):
self.nodes = []
class TemplateParser(object):
r_tag = re.compile(r'(\{\{.*?\}\})', re.DOTALL)
r_multiline = re.compile(r'(""".*?""")|(\'\'\'.*?\'\'\')', re.DOTALL)
# These are used for re-indentation.
# Indent + 1
re_block = re.compile('^(elif |else:|except:|except |finally:).*$',
re.DOTALL)
# Indent - 1
re_unblock = re.compile('^(return|continue|break|raise)( .*)?$', re.DOTALL)
# Indent - 1
re_pass = re.compile('^pass( .*)?$', re.DOTALL)
def __init__(self, text,
name = "ParserContainer",
context = dict(),
path = 'views/',
writer = 'response.write',
lexers = {},
delimiters = ('{{','}}'),
_super_nodes = [],
):
"""
text -- text to parse
context -- context to parse in
path -- folder path to templates
writer -- string of writer class to use
lexers -- dict of custom lexers to use.
delimiters -- for example ('{{','}}')
_super_nodes -- a list of nodes to check for inclusion
this should only be set by "self.extend"
It contains a list of SuperNodes from a child
template that need to be handled.
"""
# Keep a root level name.
self.name = name
# Raw text to start parsing.
self.text = text
# Writer to use (refer to the default for an example).
# This will end up as
# "%s(%s, escape=False)" % (self.writer, value)
self.writer = writer
# Dictionary of custom name lexers to use.
if isinstance(lexers, dict):
self.lexers = lexers
else:
self.lexers = {}
# Path of templates
self.path = path
# Context for templates.
self.context = context
# allow optional alternative delimiters
self.delimiters = delimiters
if delimiters!=('{{','}}'):
escaped_delimiters = (re.escape(delimiters[0]),re.escape(delimiters[1]))
self.r_tag = re.compile(r'(%s.*?%s)' % escaped_delimiters, re.DOTALL)
# Create a root level Content that everything will go into.
self.content = Content(name=name)
# Stack will hold our current stack of nodes.
# As we descend into a node, it will be added to the stack
# And when we leave, it will be removed from the stack.
# self.content should stay on the stack at all times.
self.stack = [self.content]
# This variable will hold a reference to every super block
# that we come across in this template.
self.super_nodes = []
# This variable will hold a reference to the child
# super nodes that need handling.
self.child_super_nodes = _super_nodes
# This variable will hold a reference to every block
# that we come across in this template
self.blocks = {}
# Begin parsing.
self.parse(text)
def to_string(self):
"""
Return the parsed template with correct indentation.
Used to make it easier to port to python3.
"""
return self.reindent(str(self.content))
def __str__(self):
"Make sure str works exactly the same as python 3"
return self.to_string()
def __unicode__(self):
"Make sure str works exactly the same as python 3"
return self.to_string()
def reindent(self, text):
"""
Reindents a string of unindented python code.
"""
# Get each of our lines into an array.
lines = text.split('\n')
# Our new lines
new_lines = []
# Keeps track of how many indents we have.
# Used for when we need to drop a level of indentation
# only to reindent on the next line.
credit = 0
# Current indentation
k = 0
#################
# THINGS TO KNOW
#################
# k += 1 means indent
# k -= 1 means unindent
# credit = 1 means unindent on the next line.
for raw_line in lines:
line = raw_line.strip()
# ignore empty lines
if not line:
continue
# If we have a line that contains python code that
# should be unindented for this line of code.
# and then reindented for the next line.
if TemplateParser.re_block.match(line):
k = k + credit - 1
# We obviously can't have a negative indentation
k = max(k,0)
# Add the indentation!
new_lines.append(' '*(4*k)+line)
# Bank account back to 0 again :(
credit = 0
# If we are a pass block, we obviously de-dent.
if TemplateParser.re_pass.match(line):
k -= 1
# If we are any of the following, de-dent.
# However, we should stay on the same level
# But the line right after us will be de-dented.
# So we add one credit to keep us at the level
# while moving back one indentation level.
if TemplateParser.re_unblock.match(line):
credit = 1
k -= 1
# If we are an if statement, a try, or a semi-colon we
# probably need to indent the next line.
if line.endswith(':') and not line.startswith('#'):
k += 1
# This must come before so that we can raise an error with the
# right content.
new_text = '\n'.join(new_lines)
if k > 0:
self._raise_error('missing "pass" in view', new_text)
elif k < 0:
self._raise_error('too many "pass" in view', new_text)
return new_text
def _raise_error(self, message='', text=None):
"""
Raise an error using itself as the filename and textual content.
"""
raise RestrictedError(self.name, text or self.text, message)
def _get_file_text(self, filename):
"""
Attempt to open ``filename`` and retrieve its text.
This will use self.path to search for the file.
"""
# If they didn't specify a filename, how can we find one!
if not filename.strip():
self._raise_error('Invalid template filename')
# Get the filename; filename looks like ``"template.html"``.
# We need to eval to remove the quotes and get the string type.
filename = eval(filename, self.context)
# Get the path of the file on the system.
filepath = os.path.join(self.path, filename)
# try to read the text.
try:
fileobj = open(filepath, 'rb')
text = fileobj.read()
fileobj.close()
except IOError:
self._raise_error('Unable to open included view file: ' + filepath)
return text
def include(self, content, filename):
"""
Include ``filename`` here.
"""
text = self._get_file_text(filename)
t = TemplateParser(text,
name = filename,
context = self.context,
path = self.path,
writer = self.writer,
delimiters = self.delimiters)
content.append(t.content)
def extend(self, filename):
"""
Extend ``filename``. Anything not declared in a block defined by the
parent will be placed in the parent templates ``{{include}}`` block.
"""
text = self._get_file_text(filename)
# Create out nodes list to send to the parent
super_nodes = []
# We want to include any non-handled nodes.
super_nodes.extend(self.child_super_nodes)
# And our nodes as well.
super_nodes.extend(self.super_nodes)
t = TemplateParser(text,
name = filename,
context = self.context,
path = self.path,
writer = self.writer,
delimiters = self.delimiters,
_super_nodes = super_nodes)
# Make a temporary buffer that is unique for parent
# template.
buf = BlockNode(name='__include__' + filename, delimiters=self.delimiters)
pre = []
# Iterate through each of our nodes
for node in self.content.nodes:
# If a node is a block
if isinstance(node, BlockNode):
# That happens to be in the parent template
if node.name in t.content.blocks:
# Do not include it
continue
if isinstance(node, Node):
# Or if the node was before the extension
# we should not include it
if node.pre_extend:
pre.append(node)
continue
# Otherwise, it should go int the
# Parent templates {{include}} section.
buf.append(node)
else:
buf.append(node)
# Clear our current nodes. We will be replacing this with
# the parent nodes.
self.content.nodes = []
# Set our include, unique by filename
t.content.blocks['__include__' + filename] = buf
# Make sure our pre_extended nodes go first
t.content.insert(pre)
# Then we extend our blocks
t.content.extend(self.content)
# Work off the parent node.
self.content = t.content
def parse(self, text):
# Basically, r_tag.split will split the text into
# an array containing, 'non-tag', 'tag', 'non-tag', 'tag'
# so if we alternate this variable, we know
# what to look for. This is alternate to
# line.startswith("{{")
in_tag = False
extend = None
pre_extend = True
# Use a list to store everything in
# This is because later the code will "look ahead"
# for missing strings or brackets.
ij = self.r_tag.split(text)
# j = current index
# i = current item
for j in range(len(ij)):
i = ij[j]
if i:
if len(self.stack) == 0:
self._raise_error('The "end" tag is unmatched, please check if you have a starting "block" tag')
# Our current element in the stack.
top = self.stack[-1]
if in_tag:
line = i
# If we are missing any strings!!!!
# This usually happens with the following example
# template code
#
# {{a = '}}'}}
# or
# {{a = '}}blahblah{{'}}
#
# This will fix these
# This is commented out because the current template
# system has this same limitation. Since this has a
# performance hit on larger templates, I do not recommend
# using this code on production systems. This is still here
# for "i told you it *can* be fixed" purposes.
#
#
# if line.count("'") % 2 != 0 or line.count('"') % 2 != 0:
#
# # Look ahead
# la = 1
# nextline = ij[j+la]
#
# # As long as we have not found our ending
# # brackets keep going
# while '}}' not in nextline:
# la += 1
# nextline += ij[j+la]
# # clear this line, so we
# # don't attempt to parse it
# # this is why there is an "if i"
# # around line 530
# ij[j+la] = ''
#
# # retrieve our index.
# index = nextline.index('}}')
#
# # Everything before the new brackets
# before = nextline[:index+2]
#
# # Everything after
# after = nextline[index+2:]
#
# # Make the next line everything after
# # so it parses correctly, this *should* be
# # all html
# ij[j+1] = after
#
# # Add everything before to the current line
# line += before
# Get rid of '{{' and '}}'
line = line[2:-2].strip()
# This is bad juju, but let's do it anyway
if not line:
continue
# We do not want to replace the newlines in code,
# only in block comments.
def remove_newline(re_val):
# Take the entire match and replace newlines with
# escaped newlines.
return re_val.group(0).replace('\n', '\\n')
# Perform block comment escaping.
# This performs escaping ON anything
# in between """ and """
line = re.sub(TemplateParser.r_multiline,
remove_newline,
line)
if line.startswith('='):
# IE: {{=response.title}}
name, value = '=', line[1:].strip()
else:
v = line.split(' ', 1)
if len(v) == 1:
# Example
# {{ include }}
# {{ end }}
name = v[0]
value = ''
else:
# Example
# {{ block pie }}
# {{ include "layout.html" }}
# {{ for i in range(10): }}
name = v[0]
value = v[1]
# This will replace newlines in block comments
# with the newline character. This is so that they
# retain their formatting, but squish down to one
# line in the rendered template.
# First check if we have any custom lexers
if name in self.lexers:
# Pass the information to the lexer
# and allow it to inject in the environment
# You can define custom names such as
# '{{<<variable}}' which could potentially
# write unescaped version of the variable.
self.lexers[name](parser = self,
value = value,
top = top,
stack = self.stack,)
elif name == '=':
# So we have a variable to insert into
# the template
buf = "\n%s(%s)" % (self.writer, value)
top.append(Node(buf, pre_extend = pre_extend))
elif name == 'block' and not value.startswith('='):
# Make a new node with name.
node = BlockNode(name = value.strip(),
pre_extend = pre_extend,
delimiters = self.delimiters)
# Append this node to our active node
top.append(node)
# Make sure to add the node to the stack.
# so anything after this gets added
# to this node. This allows us to
# "nest" nodes.
self.stack.append(node)
elif name == 'end' and not value.startswith('='):
# We are done with this node.
# Save an instance of it
self.blocks[top.name] = top
# Pop it.
self.stack.pop()
elif name == 'super' and not value.startswith('='):
# Get our correct target name
# If they just called {{super}} without a name
# attempt to assume the top blocks name.
if value:
target_node = value
else:
target_node = top.name
# Create a SuperNode instance
node = SuperNode(name = target_node,
pre_extend = pre_extend)
# Add this to our list to be taken care of
self.super_nodes.append(node)
# And put in in the tree
top.append(node)
elif name == 'include' and not value.startswith('='):
# If we know the target file to include
if value:
self.include(top, value)
# Otherwise, make a temporary include node
# That the child node will know to hook into.
else:
include_node = BlockNode(name = '__include__' + self.name,
pre_extend = pre_extend,
delimiters = self.delimiters)
top.append(include_node)
elif name == 'extend' and not value.startswith('='):
# We need to extend the following
# template.
extend = value
pre_extend = False
else:
# If we don't know where it belongs
# we just add it anyways without formatting.
if line and in_tag:
# Split on the newlines >.<
tokens = line.split('\n')
# We need to look for any instances of
# for i in range(10):
# = i
# pass
# So we can properly put a response.write() in place.
continuation = False
len_parsed = 0
for k in range(len(tokens)):
tokens[k] = tokens[k].strip()
len_parsed += len(tokens[k])
if tokens[k].startswith('='):
if tokens[k].endswith('\\'):
continuation = True
tokens[k] = "\n%s(%s" % (self.writer, tokens[k][1:].strip())
else:
tokens[k] = "\n%s(%s)" % (self.writer, tokens[k][1:].strip())
elif continuation:
tokens[k] += ')'
continuation = False
buf = "\n%s" % '\n'.join(tokens)
top.append(Node(buf, pre_extend = pre_extend))
else:
# It is HTML so just include it.
buf = "\n%s(%r, escape=False)" % (self.writer, i)
top.append(Node(buf, pre_extend = pre_extend))
# Remember: tag, not tag, tag, not tag
in_tag = not in_tag
# Make a list of items to remove from child
to_rm = []
# Go through each of the children nodes
for node in self.child_super_nodes:
# If we declared a block that this node wants to include
if node.name in self.blocks:
# Go ahead and include it!
node.value = self.blocks[node.name]
# Since we processed this child, we don't need to
# pass it along to the parent
to_rm.append(node)
# Remove some of the processed nodes
for node in to_rm:
# Since this is a pointer, it works beautifully.
# Sometimes I miss C-Style pointers... I want my asterisk...
self.child_super_nodes.remove(node)
# If we need to extend a template.
if extend:
self.extend(extend)
# We need this for integration with gluon
def parse_template(filename,
path = 'views/',
context = dict(),
lexers = {},
delimiters = ('{{','}}')
):
"""
filename can be a view filename in the views folder or an input stream
path is the path of a views folder
context is a dictionary of symbols used to render the template
"""
# First, if we have a str try to open the file
if isinstance(filename, str):
try:
fp = open(os.path.join(path, filename), 'rb')
text = fp.read()
fp.close()
except IOError:
raise RestrictedError(filename, '', 'Unable to find the file')
else:
text = filename.read()
# Use the file contents to get a parsed template and return it.
return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters))
def get_parsed(text):
"""
Returns the indented python code of text. Useful for unit testing.
"""
return str(TemplateParser(text))
# And this is a generic render function.
# Here for integration with gluon.
def render(content = "hello world",
stream = None,
filename = None,
path = None,
context = {},
lexers = {},
delimiters = ('{{','}}')
):
"""
>>> render()
'hello world'
>>> render(content='abc')
'abc'
>>> render(content='abc\\'')
"abc'"
>>> render(content='a"\\'bc')
'a"\\'bc'
>>> render(content='a\\nbc')
'a\\nbc'
>>> render(content='a"bcd"e')
'a"bcd"e'
>>> render(content="'''a\\nc'''")
"'''a\\nc'''"
>>> render(content="'''a\\'c'''")
"'''a\'c'''"
>>> render(content='{{for i in range(a):}}{{=i}}<br />{{pass}}', context=dict(a=5))
'0<br />1<br />2<br />3<br />4<br />'
>>> render(content='{%for i in range(a):%}{%=i%}<br />{%pass%}', context=dict(a=5),delimiters=('{%','%}'))
'0<br />1<br />2<br />3<br />4<br />'
>>> render(content="{{='''hello\\nworld'''}}")
'hello\\nworld'
>>> render(content='{{for i in range(3):\\n=i\\npass}}')
'012'
"""
# Here to avoid circular Imports
try:
from globals import Response
except:
# Working standalone. Build a mock Response object.
class Response():
def __init__(self):
self.body = cStringIO.StringIO()
def write(self, data, escape=True):
if not escape:
self.body.write(str(data))
elif hasattr(data,'xml') and callable(data.xml):
self.body.write(data.xml())
else:
# make it a string
if not isinstance(data, (str, unicode)):
data = str(data)
elif isinstance(data, unicode):
data = data.encode('utf8', 'xmlcharrefreplace')
data = cgi.escape(data, True).replace("'","'")
self.body.write(data)
# A little helper to avoid escaping.
class NOESCAPE():
def __init__(self, text):
self.text = text
def xml(self):
return self.text
# Add it to the context so we can use it.
context['NOESCAPE'] = NOESCAPE
# If we don't have anything to render, why bother?
if not content and not stream and not filename:
raise SyntaxError, "Must specify a stream or filename or content"
# Here for legacy purposes, probably can be reduced to something more simple.
close_stream = False
if not stream:
if filename:
stream = open(filename, 'rb')
close_stream = True
elif content:
stream = cStringIO.StringIO(content)
# Get a response class.
context['response'] = Response()
# Execute the template.
code = str(TemplateParser(stream.read(), context=context, path=path, lexers=lexers, delimiters=delimiters))
try:
exec(code) in context
except Exception:
# for i,line in enumerate(code.split('\n')): print i,line
raise
if close_stream:
stream.close()
# Returned the rendered content.
return context['response'].body.getvalue()
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python |
import codecs, encodings
"""Caller will hand this library a buffer and ask it to either convert
it or auto-detect the type.
Based on http://code.activestate.com/recipes/52257/
Licensed under the PSF License
"""
# None represents a potentially variable byte. "##" in the XML spec...
autodetect_dict={ # bytepattern : ("name",
(0x00, 0x00, 0xFE, 0xFF) : ("ucs4_be"),
(0xFF, 0xFE, 0x00, 0x00) : ("ucs4_le"),
(0xFE, 0xFF, None, None) : ("utf_16_be"),
(0xFF, 0xFE, None, None) : ("utf_16_le"),
(0x00, 0x3C, 0x00, 0x3F) : ("utf_16_be"),
(0x3C, 0x00, 0x3F, 0x00) : ("utf_16_le"),
(0x3C, 0x3F, 0x78, 0x6D): ("utf_8"),
(0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC")
}
def autoDetectXMLEncoding(buffer):
""" buffer -> encoding_name
The buffer should be at least 4 bytes long.
Returns None if encoding cannot be detected.
Note that encoding_name might not have an installed
decoder (e.g. EBCDIC)
"""
# a more efficient implementation would not decode the whole
# buffer at once but otherwise we'd have to decode a character at
# a time looking for the quote character...that's a pain
encoding = "utf_8" # according to the XML spec, this is the default
# this code successively tries to refine the default
# whenever it fails to refine, it falls back to
# the last place encoding was set.
if len(buffer)>=4:
bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4]))
enc_info = autodetect_dict.get(bytes, None)
if not enc_info: # try autodetection again removing potentially
# variable bytes
bytes = (byte1, byte2, None, None)
enc_info = autodetect_dict.get(bytes)
else:
enc_info = None
if enc_info:
encoding = enc_info # we've got a guess... these are
#the new defaults
# try to find a more precise encoding using xml declaration
secret_decoder_ring = codecs.lookup(encoding)[1]
(decoded,length) = secret_decoder_ring(buffer)
first_line = decoded.split("\n")[0]
if first_line and first_line.startswith(u"<?xml"):
encoding_pos = first_line.find(u"encoding")
if encoding_pos!=-1:
# look for double quote
quote_pos=first_line.find('"', encoding_pos)
if quote_pos==-1: # look for single quote
quote_pos=first_line.find("'", encoding_pos)
if quote_pos>-1:
quote_char,rest=(first_line[quote_pos],
first_line[quote_pos+1:])
encoding=rest[:rest.find(quote_char)]
return encoding
def decoder(buffer):
encoding = autoDetectXMLEncoding(buffer)
return buffer.decode(encoding).encode('utf8')
| Python |
# encoding utf-8
__author__ = "Thadeus Burgess <thadeusb@thadeusb.com>"
# we classify as "non-reserved" those key words that are explicitly known
# to the parser but are allowed as column or table names. Some key words
# that are otherwise non-reserved cannot be used as function or data type n
# ames and are in the nonreserved list. (Most of these words represent
# built-in functions or data types with special syntax. The function
# or type is still available but it cannot be redefined by the user.)
# Labeled "reserved" are those tokens that are not allowed as column or
# table names. Some reserved key words are allowable as names for
# functions or data typesself.
# Note at the bottom of the list is a dict containing references to the
# tuples, and also if you add a list don't forget to remove its default
# set of COMMON.
# Keywords that are adapter specific. Such as a list of "postgresql"
# or "mysql" keywords
# These are keywords that are common to all SQL dialects, and should
# never be used as a table or column. Even if you use one of these
# the cursor will throw an OperationalError for the SQL syntax.
COMMON = set((
'SELECT',
'INSERT',
'DELETE',
'UPDATE',
'DROP',
'CREATE',
'ALTER',
'WHERE',
'FROM',
'INNER',
'JOIN',
'AND',
'OR',
'LIKE',
'ON',
'IN',
'SET',
'BY',
'GROUP',
'ORDER',
'LEFT',
'OUTER',
'IF',
'END',
'THEN',
'LOOP',
'AS',
'ELSE',
'FOR',
'CASE',
'WHEN',
'MIN',
'MAX',
'DISTINCT',
))
POSTGRESQL = set((
'FALSE',
'TRUE',
'ALL',
'ANALYSE',
'ANALYZE',
'AND',
'ANY',
'ARRAY',
'AS',
'ASC',
'ASYMMETRIC',
'AUTHORIZATION',
'BETWEEN',
'BIGINT',
'BINARY',
'BIT',
'BOOLEAN',
'BOTH',
'CASE',
'CAST',
'CHAR',
'CHARACTER',
'CHECK',
'COALESCE',
'COLLATE',
'COLUMN',
'CONSTRAINT',
'CREATE',
'CROSS',
'CURRENT_CATALOG',
'CURRENT_DATE',
'CURRENT_ROLE',
'CURRENT_SCHEMA',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'DEC',
'DECIMAL',
'DEFAULT',
'DEFERRABLE',
'DESC',
'DISTINCT',
'DO',
'ELSE',
'END',
'EXCEPT',
'EXISTS',
'EXTRACT',
'FETCH',
'FLOAT',
'FOR',
'FOREIGN',
'FREEZE',
'FROM',
'FULL',
'GRANT',
'GREATEST',
'GROUP',
'HAVING',
'ILIKE',
'IN',
'INITIALLY',
'INNER',
'INOUT',
'INT',
'INTEGER',
'INTERSECT',
'INTERVAL',
'INTO',
'IS',
'ISNULL',
'JOIN',
'LEADING',
'LEAST',
'LEFT',
'LIKE',
'LIMIT',
'LOCALTIME',
'LOCALTIMESTAMP',
'NATIONAL',
'NATURAL',
'NCHAR',
'NEW',
'NONE',
'NOT',
'NOTNULL',
'NULL',
'NULLIF',
'NUMERIC',
'OFF',
'OFFSET',
'OLD',
'ON',
'ONLY',
'OR',
'ORDER',
'OUT',
'OUTER',
'OVERLAPS',
'OVERLAY',
'PLACING',
'POSITION',
'PRECISION',
'PRIMARY',
'REAL',
'REFERENCES',
'RETURNING',
'RIGHT',
'ROW',
'SELECT',
'SESSION_USER',
'SETOF',
'SIMILAR',
'SMALLINT',
'SOME',
'SUBSTRING',
'SYMMETRIC',
'TABLE',
'THEN',
'TIME',
'TIMESTAMP',
'TO',
'TRAILING',
'TREAT',
'TRIM',
'UNION',
'UNIQUE',
'USER',
'USING',
'VALUES',
'VARCHAR',
'VARIADIC',
'VERBOSE',
'WHEN',
'WHERE',
'WITH',
'XMLATTRIBUTES',
'XMLCONCAT',
'XMLELEMENT',
'XMLFOREST',
'XMLPARSE',
'XMLPI',
'XMLROOT',
'XMLSERIALIZE',
))
POSTGRESQL_NONRESERVED = set((
'A',
'ABORT',
'ABS',
'ABSENT',
'ABSOLUTE',
'ACCESS',
'ACCORDING',
'ACTION',
'ADA',
'ADD',
'ADMIN',
'AFTER',
'AGGREGATE',
'ALIAS',
'ALLOCATE',
'ALSO',
'ALTER',
'ALWAYS',
'ARE',
'ARRAY_AGG',
'ASENSITIVE',
'ASSERTION',
'ASSIGNMENT',
'AT',
'ATOMIC',
'ATTRIBUTE',
'ATTRIBUTES',
'AVG',
'BACKWARD',
'BASE64',
'BEFORE',
'BEGIN',
'BERNOULLI',
'BIT_LENGTH',
'BITVAR',
'BLOB',
'BOM',
'BREADTH',
'BY',
'C',
'CACHE',
'CALL',
'CALLED',
'CARDINALITY',
'CASCADE',
'CASCADED',
'CATALOG',
'CATALOG_NAME',
'CEIL',
'CEILING',
'CHAIN',
'CHAR_LENGTH',
'CHARACTER_LENGTH',
'CHARACTER_SET_CATALOG',
'CHARACTER_SET_NAME',
'CHARACTER_SET_SCHEMA',
'CHARACTERISTICS',
'CHARACTERS',
'CHECKED',
'CHECKPOINT',
'CLASS',
'CLASS_ORIGIN',
'CLOB',
'CLOSE',
'CLUSTER',
'COBOL',
'COLLATION',
'COLLATION_CATALOG',
'COLLATION_NAME',
'COLLATION_SCHEMA',
'COLLECT',
'COLUMN_NAME',
'COLUMNS',
'COMMAND_FUNCTION',
'COMMAND_FUNCTION_CODE',
'COMMENT',
'COMMIT',
'COMMITTED',
'COMPLETION',
'CONCURRENTLY',
'CONDITION',
'CONDITION_NUMBER',
'CONFIGURATION',
'CONNECT',
'CONNECTION',
'CONNECTION_NAME',
'CONSTRAINT_CATALOG',
'CONSTRAINT_NAME',
'CONSTRAINT_SCHEMA',
'CONSTRAINTS',
'CONSTRUCTOR',
'CONTAINS',
'CONTENT',
'CONTINUE',
'CONVERSION',
'CONVERT',
'COPY',
'CORR',
'CORRESPONDING',
'COST',
'COUNT',
'COVAR_POP',
'COVAR_SAMP',
'CREATEDB',
'CREATEROLE',
'CREATEUSER',
'CSV',
'CUBE',
'CUME_DIST',
'CURRENT',
'CURRENT_DEFAULT_TRANSFORM_GROUP',
'CURRENT_PATH',
'CURRENT_TRANSFORM_GROUP_FOR_TYPE',
'CURSOR',
'CURSOR_NAME',
'CYCLE',
'DATA',
'DATABASE',
'DATE',
'DATETIME_INTERVAL_CODE',
'DATETIME_INTERVAL_PRECISION',
'DAY',
'DEALLOCATE',
'DECLARE',
'DEFAULTS',
'DEFERRED',
'DEFINED',
'DEFINER',
'DEGREE',
'DELETE',
'DELIMITER',
'DELIMITERS',
'DENSE_RANK',
'DEPTH',
'DEREF',
'DERIVED',
'DESCRIBE',
'DESCRIPTOR',
'DESTROY',
'DESTRUCTOR',
'DETERMINISTIC',
'DIAGNOSTICS',
'DICTIONARY',
'DISABLE',
'DISCARD',
'DISCONNECT',
'DISPATCH',
'DOCUMENT',
'DOMAIN',
'DOUBLE',
'DROP',
'DYNAMIC',
'DYNAMIC_FUNCTION',
'DYNAMIC_FUNCTION_CODE',
'EACH',
'ELEMENT',
'EMPTY',
'ENABLE',
'ENCODING',
'ENCRYPTED',
'END-EXEC',
'ENUM',
'EQUALS',
'ESCAPE',
'EVERY',
'EXCEPTION',
'EXCLUDE',
'EXCLUDING',
'EXCLUSIVE',
'EXEC',
'EXECUTE',
'EXISTING',
'EXP',
'EXPLAIN',
'EXTERNAL',
'FAMILY',
'FILTER',
'FINAL',
'FIRST',
'FIRST_VALUE',
'FLAG',
'FLOOR',
'FOLLOWING',
'FORCE',
'FORTRAN',
'FORWARD',
'FOUND',
'FREE',
'FUNCTION',
'FUSION',
'G',
'GENERAL',
'GENERATED',
'GET',
'GLOBAL',
'GO',
'GOTO',
'GRANTED',
'GROUPING',
'HANDLER',
'HEADER',
'HEX',
'HIERARCHY',
'HOLD',
'HOST',
'HOUR',
# 'ID',
'IDENTITY',
'IF',
'IGNORE',
'IMMEDIATE',
'IMMUTABLE',
'IMPLEMENTATION',
'IMPLICIT',
'INCLUDING',
'INCREMENT',
'INDENT',
'INDEX',
'INDEXES',
'INDICATOR',
'INFIX',
'INHERIT',
'INHERITS',
'INITIALIZE',
'INPUT',
'INSENSITIVE',
'INSERT',
'INSTANCE',
'INSTANTIABLE',
'INSTEAD',
'INTERSECTION',
'INVOKER',
'ISOLATION',
'ITERATE',
'K',
'KEY',
'KEY_MEMBER',
'KEY_TYPE',
'LAG',
'LANCOMPILER',
'LANGUAGE',
'LARGE',
'LAST',
'LAST_VALUE',
'LATERAL',
'LC_COLLATE',
'LC_CTYPE',
'LEAD',
'LENGTH',
'LESS',
'LEVEL',
'LIKE_REGEX',
'LISTEN',
'LN',
'LOAD',
'LOCAL',
'LOCATION',
'LOCATOR',
'LOCK',
'LOGIN',
'LOWER',
'M',
'MAP',
'MAPPING',
'MATCH',
'MATCHED',
'MAX',
'MAX_CARDINALITY',
'MAXVALUE',
'MEMBER',
'MERGE',
'MESSAGE_LENGTH',
'MESSAGE_OCTET_LENGTH',
'MESSAGE_TEXT',
'METHOD',
'MIN',
'MINUTE',
'MINVALUE',
'MOD',
'MODE',
'MODIFIES',
'MODIFY',
'MODULE',
'MONTH',
'MORE',
'MOVE',
'MULTISET',
'MUMPS',
# 'NAME',
'NAMES',
'NAMESPACE',
'NCLOB',
'NESTING',
'NEXT',
'NFC',
'NFD',
'NFKC',
'NFKD',
'NIL',
'NO',
'NOCREATEDB',
'NOCREATEROLE',
'NOCREATEUSER',
'NOINHERIT',
'NOLOGIN',
'NORMALIZE',
'NORMALIZED',
'NOSUPERUSER',
'NOTHING',
'NOTIFY',
'NOWAIT',
'NTH_VALUE',
'NTILE',
'NULLABLE',
'NULLS',
'NUMBER',
'OBJECT',
'OCCURRENCES_REGEX',
'OCTET_LENGTH',
'OCTETS',
'OF',
'OIDS',
'OPEN',
'OPERATION',
'OPERATOR',
'OPTION',
'OPTIONS',
'ORDERING',
'ORDINALITY',
'OTHERS',
'OUTPUT',
'OVER',
'OVERRIDING',
'OWNED',
'OWNER',
'P',
'PAD',
'PARAMETER',
'PARAMETER_MODE',
'PARAMETER_NAME',
'PARAMETER_ORDINAL_POSITION',
'PARAMETER_SPECIFIC_CATALOG',
'PARAMETER_SPECIFIC_NAME',
'PARAMETER_SPECIFIC_SCHEMA',
'PARAMETERS',
'PARSER',
'PARTIAL',
'PARTITION',
'PASCAL',
'PASSING',
# 'PASSWORD',
'PATH',
'PERCENT_RANK',
'PERCENTILE_CONT',
'PERCENTILE_DISC',
'PLANS',
'PLI',
'POSITION_REGEX',
'POSTFIX',
'POWER',
'PRECEDING',
'PREFIX',
'PREORDER',
'PREPARE',
'PREPARED',
'PRESERVE',
'PRIOR',
'PRIVILEGES',
'PROCEDURAL',
'PROCEDURE',
'PUBLIC',
'QUOTE',
'RANGE',
'RANK',
'READ',
'READS',
'REASSIGN',
'RECHECK',
'RECURSIVE',
'REF',
'REFERENCING',
'REGR_AVGX',
'REGR_AVGY',
'REGR_COUNT',
'REGR_INTERCEPT',
'REGR_R2',
'REGR_SLOPE',
'REGR_SXX',
'REGR_SXY',
'REGR_SYY',
'REINDEX',
'RELATIVE',
'RELEASE',
'RENAME',
'REPEATABLE',
'REPLACE',
'REPLICA',
'RESET',
'RESPECT',
'RESTART',
'RESTRICT',
'RESULT',
'RETURN',
'RETURNED_CARDINALITY',
'RETURNED_LENGTH',
'RETURNED_OCTET_LENGTH',
'RETURNED_SQLSTATE',
'RETURNS',
'REVOKE',
# 'ROLE',
'ROLLBACK',
'ROLLUP',
'ROUTINE',
'ROUTINE_CATALOG',
'ROUTINE_NAME',
'ROUTINE_SCHEMA',
'ROW_COUNT',
'ROW_NUMBER',
'ROWS',
'RULE',
'SAVEPOINT',
'SCALE',
'SCHEMA',
'SCHEMA_NAME',
'SCOPE',
'SCOPE_CATALOG',
'SCOPE_NAME',
'SCOPE_SCHEMA',
'SCROLL',
'SEARCH',
'SECOND',
'SECTION',
'SECURITY',
'SELF',
'SENSITIVE',
'SEQUENCE',
'SERIALIZABLE',
'SERVER',
'SERVER_NAME',
'SESSION',
'SET',
'SETS',
'SHARE',
'SHOW',
'SIMPLE',
'SIZE',
'SOURCE',
'SPACE',
'SPECIFIC',
'SPECIFIC_NAME',
'SPECIFICTYPE',
'SQL',
'SQLCODE',
'SQLERROR',
'SQLEXCEPTION',
'SQLSTATE',
'SQLWARNING',
'SQRT',
'STABLE',
'STANDALONE',
'START',
'STATE',
'STATEMENT',
'STATIC',
'STATISTICS',
'STDDEV_POP',
'STDDEV_SAMP',
'STDIN',
'STDOUT',
'STORAGE',
'STRICT',
'STRIP',
'STRUCTURE',
'STYLE',
'SUBCLASS_ORIGIN',
'SUBLIST',
'SUBMULTISET',
'SUBSTRING_REGEX',
'SUM',
'SUPERUSER',
'SYSID',
'SYSTEM',
'SYSTEM_USER',
'T',
# 'TABLE_NAME',
'TABLESAMPLE',
'TABLESPACE',
'TEMP',
'TEMPLATE',
'TEMPORARY',
'TERMINATE',
'TEXT',
'THAN',
'TIES',
'TIMEZONE_HOUR',
'TIMEZONE_MINUTE',
'TOP_LEVEL_COUNT',
'TRANSACTION',
'TRANSACTION_ACTIVE',
'TRANSACTIONS_COMMITTED',
'TRANSACTIONS_ROLLED_BACK',
'TRANSFORM',
'TRANSFORMS',
'TRANSLATE',
'TRANSLATE_REGEX',
'TRANSLATION',
'TRIGGER',
'TRIGGER_CATALOG',
'TRIGGER_NAME',
'TRIGGER_SCHEMA',
'TRIM_ARRAY',
'TRUNCATE',
'TRUSTED',
'TYPE',
'UESCAPE',
'UNBOUNDED',
'UNCOMMITTED',
'UNDER',
'UNENCRYPTED',
'UNKNOWN',
'UNLISTEN',
'UNNAMED',
'UNNEST',
'UNTIL',
'UNTYPED',
'UPDATE',
'UPPER',
'URI',
'USAGE',
'USER_DEFINED_TYPE_CATALOG',
'USER_DEFINED_TYPE_CODE',
'USER_DEFINED_TYPE_NAME',
'USER_DEFINED_TYPE_SCHEMA',
'VACUUM',
'VALID',
'VALIDATOR',
'VALUE',
'VAR_POP',
'VAR_SAMP',
'VARBINARY',
'VARIABLE',
'VARYING',
'VERSION',
'VIEW',
'VOLATILE',
'WHENEVER',
'WHITESPACE',
'WIDTH_BUCKET',
'WINDOW',
'WITHIN',
'WITHOUT',
'WORK',
'WRAPPER',
'WRITE',
'XML',
'XMLAGG',
'XMLBINARY',
'XMLCAST',
'XMLCOMMENT',
'XMLDECLARATION',
'XMLDOCUMENT',
'XMLEXISTS',
'XMLITERATE',
'XMLNAMESPACES',
'XMLQUERY',
'XMLSCHEMA',
'XMLTABLE',
'XMLTEXT',
'XMLVALIDATE',
'YEAR',
'YES',
'ZONE',
))
#Thanks villas
FIREBIRD = set((
'ABS',
'ACTIVE',
'ADMIN',
'AFTER',
'ASCENDING',
'AUTO',
'AUTODDL',
'BASED',
'BASENAME',
'BASE_NAME',
'BEFORE',
'BIT_LENGTH',
'BLOB',
'BLOBEDIT',
'BOOLEAN',
'BOTH',
'BUFFER',
'CACHE',
'CHAR_LENGTH',
'CHARACTER_LENGTH',
'CHECK_POINT_LEN',
'CHECK_POINT_LENGTH',
'CLOSE',
'COMMITTED',
'COMPILETIME',
'COMPUTED',
'CONDITIONAL',
'CONNECT',
'CONTAINING',
'CROSS',
'CSTRING',
'CURRENT_CONNECTION',
'CURRENT_ROLE',
'CURRENT_TRANSACTION',
'CURRENT_USER',
'DATABASE',
'DB_KEY',
'DEBUG',
'DESCENDING',
'DISCONNECT',
'DISPLAY',
'DO',
'ECHO',
'EDIT',
'ENTRY_POINT',
'EVENT',
'EXIT',
'EXTERN',
'FALSE',
'FETCH',
'FILE',
'FILTER',
'FREE_IT',
'FUNCTION',
'GDSCODE',
'GENERATOR',
'GEN_ID',
'GLOBAL',
'GROUP_COMMIT_WAIT',
'GROUP_COMMIT_WAIT_TIME',
'HELP',
'IF',
'INACTIVE',
'INDEX',
'INIT',
'INPUT_TYPE',
'INSENSITIVE',
'ISQL',
'LC_MESSAGES',
'LC_TYPE',
'LEADING',
'LENGTH',
'LEV',
'LOGFILE',
'LOG_BUFFER_SIZE',
'LOG_BUF_SIZE',
'LONG',
'LOWER',
'MANUAL',
'MAXIMUM',
'MAXIMUM_SEGMENT',
'MAX_SEGMENT',
'MERGE',
'MESSAGE',
'MINIMUM',
'MODULE_NAME',
'NOAUTO',
'NUM_LOG_BUFS',
'NUM_LOG_BUFFERS',
'OCTET_LENGTH',
'OPEN',
'OUTPUT_TYPE',
'OVERFLOW',
'PAGE',
'PAGELENGTH',
'PAGES',
'PAGE_SIZE',
'PARAMETER',
# 'PASSWORD',
'PLAN',
'POST_EVENT',
'QUIT',
'RAW_PARTITIONS',
'RDB$DB_KEY',
'RECORD_VERSION',
'RECREATE',
'RECURSIVE',
'RELEASE',
'RESERV',
'RESERVING',
'RETAIN',
'RETURN',
'RETURNING_VALUES',
'RETURNS',
# 'ROLE',
'ROW_COUNT',
'ROWS',
'RUNTIME',
'SAVEPOINT',
'SECOND',
'SENSITIVE',
'SHADOW',
'SHARED',
'SHELL',
'SHOW',
'SINGULAR',
'SNAPSHOT',
'SORT',
'STABILITY',
'START',
'STARTING',
'STARTS',
'STATEMENT',
'STATIC',
'STATISTICS',
'SUB_TYPE',
'SUSPEND',
'TERMINATOR',
'TRAILING',
'TRIGGER',
'TRIM',
'TRUE',
'TYPE',
'UNCOMMITTED',
'UNKNOWN',
'USING',
'VARIABLE',
'VERSION',
'WAIT',
'WEEKDAY',
'WHILE',
'YEARDAY',
))
FIREBIRD_NONRESERVED = set((
'BACKUP',
'BLOCK',
'COALESCE',
'COLLATION',
'COMMENT',
'DELETING',
'DIFFERENCE',
'IIF',
'INSERTING',
'LAST',
'LEAVE',
'LOCK',
'NEXT',
'NULLIF',
'NULLS',
'RESTART',
'RETURNING',
'SCALAR_ARRAY',
'SEQUENCE',
'STATEMENT',
'UPDATING',
'ABS',
'ACCENT',
'ACOS',
'ALWAYS',
'ASCII_CHAR',
'ASCII_VAL',
'ASIN',
'ATAN',
'ATAN2',
'BACKUP',
'BIN_AND',
'BIN_OR',
'BIN_SHL',
'BIN_SHR',
'BIN_XOR',
'BLOCK',
'CEIL',
'CEILING',
'COLLATION',
'COMMENT',
'COS',
'COSH',
'COT',
'DATEADD',
'DATEDIFF',
'DECODE',
'DIFFERENCE',
'EXP',
'FLOOR',
'GEN_UUID',
'GENERATED',
'HASH',
'IIF',
'LIST',
'LN',
'LOG',
'LOG10',
'LPAD',
'MATCHED',
'MATCHING',
'MAXVALUE',
'MILLISECOND',
'MINVALUE',
'MOD',
'NEXT',
'OVERLAY',
'PAD',
'PI',
'PLACING',
'POWER',
'PRESERVE',
'RAND',
'REPLACE',
'RESTART',
'RETURNING',
'REVERSE',
'ROUND',
'RPAD',
'SCALAR_ARRAY',
'SEQUENCE',
'SIGN',
'SIN',
'SINH',
'SPACE',
'SQRT',
'TAN',
'TANH',
'TEMPORARY',
'TRUNC',
'WEEK',
))
# Thanks Jonathan Lundell
MYSQL = set((
'ACCESSIBLE',
'ADD',
'ALL',
'ALTER',
'ANALYZE',
'AND',
'AS',
'ASC',
'ASENSITIVE',
'BEFORE',
'BETWEEN',
'BIGINT',
'BINARY',
'BLOB',
'BOTH',
'BY',
'CALL',
'CASCADE',
'CASE',
'CHANGE',
'CHAR',
'CHARACTER',
'CHECK',
'COLLATE',
'COLUMN',
'CONDITION',
'CONSTRAINT',
'CONTINUE',
'CONVERT',
'CREATE',
'CROSS',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'DATABASE',
'DATABASES',
'DAY_HOUR',
'DAY_MICROSECOND',
'DAY_MINUTE',
'DAY_SECOND',
'DEC',
'DECIMAL',
'DECLARE',
'DEFAULT',
'DELAYED',
'DELETE',
'DESC',
'DESCRIBE',
'DETERMINISTIC',
'DISTINCT',
'DISTINCTROW',
'DIV',
'DOUBLE',
'DROP',
'DUAL',
'EACH',
'ELSE',
'ELSEIF',
'ENCLOSED',
'ESCAPED',
'EXISTS',
'EXIT',
'EXPLAIN',
'FALSE',
'FETCH',
'FLOAT',
'FLOAT4',
'FLOAT8',
'FOR',
'FORCE',
'FOREIGN',
'FROM',
'FULLTEXT',
'GRANT',
'GROUP',
'HAVING',
'HIGH_PRIORITY',
'HOUR_MICROSECOND',
'HOUR_MINUTE',
'HOUR_SECOND',
'IF',
'IGNORE',
'IGNORE_SERVER_IDS',
'IGNORE_SERVER_IDS',
'IN',
'INDEX',
'INFILE',
'INNER',
'INOUT',
'INSENSITIVE',
'INSERT',
'INT',
'INT1',
'INT2',
'INT3',
'INT4',
'INT8',
'INTEGER',
'INTERVAL',
'INTO',
'IS',
'ITERATE',
'JOIN',
'KEY',
'KEYS',
'KILL',
'LEADING',
'LEAVE',
'LEFT',
'LIKE',
'LIMIT',
'LINEAR',
'LINES',
'LOAD',
'LOCALTIME',
'LOCALTIMESTAMP',
'LOCK',
'LONG',
'LONGBLOB',
'LONGTEXT',
'LOOP',
'LOW_PRIORITY',
'MASTER_HEARTBEAT_PERIOD',
'MASTER_HEARTBEAT_PERIOD',
'MASTER_SSL_VERIFY_SERVER_CERT',
'MATCH',
'MAXVALUE',
'MAXVALUE',
'MEDIUMBLOB',
'MEDIUMINT',
'MEDIUMTEXT',
'MIDDLEINT',
'MINUTE_MICROSECOND',
'MINUTE_SECOND',
'MOD',
'MODIFIES',
'NATURAL',
'NO_WRITE_TO_BINLOG',
'NOT',
'NULL',
'NUMERIC',
'ON',
'OPTIMIZE',
'OPTION',
'OPTIONALLY',
'OR',
'ORDER',
'OUT',
'OUTER',
'OUTFILE',
'PRECISION',
'PRIMARY',
'PROCEDURE',
'PURGE',
'RANGE',
'READ',
'READ_WRITE',
'READS',
'REAL',
'REFERENCES',
'REGEXP',
'RELEASE',
'RENAME',
'REPEAT',
'REPLACE',
'REQUIRE',
'RESIGNAL',
'RESIGNAL',
'RESTRICT',
'RETURN',
'REVOKE',
'RIGHT',
'RLIKE',
'SCHEMA',
'SCHEMAS',
'SECOND_MICROSECOND',
'SELECT',
'SENSITIVE',
'SEPARATOR',
'SET',
'SHOW',
'SIGNAL',
'SIGNAL',
'SMALLINT',
'SPATIAL',
'SPECIFIC',
'SQL',
'SQL_BIG_RESULT',
'SQL_CALC_FOUND_ROWS',
'SQL_SMALL_RESULT',
'SQLEXCEPTION',
'SQLSTATE',
'SQLWARNING',
'SSL',
'STARTING',
'STRAIGHT_JOIN',
'TABLE',
'TERMINATED',
'THEN',
'TINYBLOB',
'TINYINT',
'TINYTEXT',
'TO',
'TRAILING',
'TRIGGER',
'TRUE',
'UNDO',
'UNION',
'UNIQUE',
'UNLOCK',
'UNSIGNED',
'UPDATE',
'USAGE',
'USE',
'USING',
'UTC_DATE',
'UTC_TIME',
'UTC_TIMESTAMP',
'VALUES',
'VARBINARY',
'VARCHAR',
'VARCHARACTER',
'VARYING',
'WHEN',
'WHERE',
'WHILE',
'WITH',
'WRITE',
'XOR',
'YEAR_MONTH',
'ZEROFILL',
))
MSSQL = set((
'ADD',
'ALL',
'ALTER',
'AND',
'ANY',
'AS',
'ASC',
'AUTHORIZATION',
'BACKUP',
'BEGIN',
'BETWEEN',
'BREAK',
'BROWSE',
'BULK',
'BY',
'CASCADE',
'CASE',
'CHECK',
'CHECKPOINT',
'CLOSE',
'CLUSTERED',
'COALESCE',
'COLLATE',
'COLUMN',
'COMMIT',
'COMPUTE',
'CONSTRAINT',
'CONTAINS',
'CONTAINSTABLE',
'CONTINUE',
'CONVERT',
'CREATE',
'CROSS',
'CURRENT',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'CURRENT_USER',
'CURSOR',
'DATABASE',
'DBCC',
'DEALLOCATE',
'DECLARE',
'DEFAULT',
'DELETE',
'DENY',
'DESC',
'DISK',
'DISTINCT',
'DISTRIBUTED',
'DOUBLE',
'DROP',
'DUMMY',
'DUMP',
'ELSE',
'END',
'ERRLVL',
'ESCAPE',
'EXCEPT',
'EXEC',
'EXECUTE',
'EXISTS',
'EXIT',
'FETCH',
'FILE',
'FILLFACTOR',
'FOR',
'FOREIGN',
'FREETEXT',
'FREETEXTTABLE',
'FROM',
'FULL',
'FUNCTION',
'GOTO',
'GRANT',
'GROUP',
'HAVING',
'HOLDLOCK',
'IDENTITY',
'IDENTITY_INSERT',
'IDENTITYCOL',
'IF',
'IN',
'INDEX',
'INNER',
'INSERT',
'INTERSECT',
'INTO',
'IS',
'JOIN',
'KEY',
'KILL',
'LEFT',
'LIKE',
'LINENO',
'LOAD',
'NATIONAL ',
'NOCHECK',
'NONCLUSTERED',
'NOT',
'NULL',
'NULLIF',
'OF',
'OFF',
'OFFSETS',
'ON',
'OPEN',
'OPENDATASOURCE',
'OPENQUERY',
'OPENROWSET',
'OPENXML',
'OPTION',
'OR',
'ORDER',
'OUTER',
'OVER',
'PERCENT',
'PLAN',
'PRECISION',
'PRIMARY',
'PRINT',
'PROC',
'PROCEDURE',
'PUBLIC',
'RAISERROR',
'READ',
'READTEXT',
'RECONFIGURE',
'REFERENCES',
'REPLICATION',
'RESTORE',
'RESTRICT',
'RETURN',
'REVOKE',
'RIGHT',
'ROLLBACK',
'ROWCOUNT',
'ROWGUIDCOL',
'RULE',
'SAVE',
'SCHEMA',
'SELECT',
'SESSION_USER',
'SET',
'SETUSER',
'SHUTDOWN',
'SOME',
'STATISTICS',
'SYSTEM_USER',
'TABLE',
'TEXTSIZE',
'THEN',
'TO',
'TOP',
'TRAN',
'TRANSACTION',
'TRIGGER',
'TRUNCATE',
'TSEQUAL',
'UNION',
'UNIQUE',
'UPDATE',
'UPDATETEXT',
'USE',
'USER',
'VALUES',
'VARYING',
'VIEW',
'WAITFOR',
'WHEN',
'WHERE',
'WHILE',
'WITH',
'WRITETEXT',
))
ORACLE = set((
'ACCESS',
'ADD',
'ALL',
'ALTER',
'AND',
'ANY',
'AS',
'ASC',
'AUDIT',
'BETWEEN',
'BY',
'CHAR',
'CHECK',
'CLUSTER',
'COLUMN',
'COMMENT',
'COMPRESS',
'CONNECT',
'CREATE',
'CURRENT',
'DATE',
'DECIMAL',
'DEFAULT',
'DELETE',
'DESC',
'DISTINCT',
'DROP',
'ELSE',
'EXCLUSIVE',
'EXISTS',
'FILE',
'FLOAT',
'FOR',
'FROM',
'GRANT',
'GROUP',
'HAVING',
'IDENTIFIED',
'IMMEDIATE',
'IN',
'INCREMENT',
'INDEX',
'INITIAL',
'INSERT',
'INTEGER',
'INTERSECT',
'INTO',
'IS',
'LEVEL',
'LIKE',
'LOCK',
'LONG',
'MAXEXTENTS',
'MINUS',
'MLSLABEL',
'MODE',
'MODIFY',
'NOAUDIT',
'NOCOMPRESS',
'NOT',
'NOWAIT',
'NULL',
'NUMBER',
'OF',
'OFFLINE',
'ON',
'ONLINE',
'OPTION',
'OR',
'ORDER',
'PCTFREE',
'PRIOR',
'PRIVILEGES',
'PUBLIC',
'RAW',
'RENAME',
'RESOURCE',
'REVOKE',
'ROW',
'ROWID',
'ROWNUM',
'ROWS',
'SELECT',
'SESSION',
'SET',
'SHARE',
'SIZE',
'SMALLINT',
'START',
'SUCCESSFUL',
'SYNONYM',
'SYSDATE',
'TABLE',
'THEN',
'TO',
'TRIGGER',
'UID',
'UNION',
'UNIQUE',
'UPDATE',
'USER',
'VALIDATE',
'VALUES',
'VARCHAR',
'VARCHAR2',
'VIEW',
'WHENEVER',
'WHERE',
'WITH',
))
SQLITE = set((
'ABORT',
'ACTION',
'ADD',
'AFTER',
'ALL',
'ALTER',
'ANALYZE',
'AND',
'AS',
'ASC',
'ATTACH',
'AUTOINCREMENT',
'BEFORE',
'BEGIN',
'BETWEEN',
'BY',
'CASCADE',
'CASE',
'CAST',
'CHECK',
'COLLATE',
'COLUMN',
'COMMIT',
'CONFLICT',
'CONSTRAINT',
'CREATE',
'CROSS',
'CURRENT_DATE',
'CURRENT_TIME',
'CURRENT_TIMESTAMP',
'DATABASE',
'DEFAULT',
'DEFERRABLE',
'DEFERRED',
'DELETE',
'DESC',
'DETACH',
'DISTINCT',
'DROP',
'EACH',
'ELSE',
'END',
'ESCAPE',
'EXCEPT',
'EXCLUSIVE',
'EXISTS',
'EXPLAIN',
'FAIL',
'FOR',
'FOREIGN',
'FROM',
'FULL',
'GLOB',
'GROUP',
'HAVING',
'IF',
'IGNORE',
'IMMEDIATE',
'IN',
'INDEX',
'INDEXED',
'INITIALLY',
'INNER',
'INSERT',
'INSTEAD',
'INTERSECT',
'INTO',
'IS',
'ISNULL',
'JOIN',
'KEY',
'LEFT',
'LIKE',
'LIMIT',
'MATCH',
'NATURAL',
'NO',
'NOT',
'NOTNULL',
'NULL',
'OF',
'OFFSET',
'ON',
'OR',
'ORDER',
'OUTER',
'PLAN',
'PRAGMA',
'PRIMARY',
'QUERY',
'RAISE',
'REFERENCES',
'REGEXP',
'REINDEX',
'RELEASE',
'RENAME',
'REPLACE',
'RESTRICT',
'RIGHT',
'ROLLBACK',
'ROW',
'SAVEPOINT',
'SELECT',
'SET',
'TABLE',
'TEMP',
'TEMPORARY',
'THEN',
'TO',
'TRANSACTION',
'TRIGGER',
'UNION',
'UNIQUE',
'UPDATE',
'USING',
'VACUUM',
'VALUES',
'VIEW',
'VIRTUAL',
'WHEN',
'WHERE',
))
# remove from here when you add a list.
JDBCSQLITE = SQLITE
DB2 = INFORMIX = INGRES = JDBCPOSTGRESQL = COMMON
ADAPTERS = {
'sqlite': SQLITE,
'mysql': MYSQL,
'postgres': POSTGRESQL,
'postgres_nonreserved': POSTGRESQL_NONRESERVED,
'oracle': ORACLE,
'mssql': MSSQL,
'mssql2': MSSQL,
'db2': DB2,
'informix': INFORMIX,
'firebird': FIREBIRD,
'firebird_embedded': FIREBIRD,
'firebird_nonreserved': FIREBIRD_NONRESERVED,
'ingres': INGRES,
'ingresu': INGRES,
'jdbc:sqlite': JDBCSQLITE,
'jdbc:postgres': JDBCPOSTGRESQL,
'common': COMMON,
}
ADAPTERS['all'] = reduce(lambda a,b:a.union(b),(x for x in ADAPTERS.values()))
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.