code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import pyloggingconf
from .debugfile import DebugFile
from .listeners import Listeners
from .logger import LOGGER
from .loggerhelper import AbstractLogger
from .xmllogger import XmlLogger
class Output(AbstractLogger):
def __init__(self, settings):
AbstractLogger.__init__(self)
self._xmllogger = XmlLogger(settings['Output'], settings['LogLevel'])
self._register_loggers(settings['Listeners'], settings['DebugFile'])
self._settings = settings
def _register_loggers(self, listeners, debugfile):
LOGGER.register_context_changing_logger(self._xmllogger)
for logger in Listeners(listeners), DebugFile(debugfile):
if logger: LOGGER.register_logger(logger)
LOGGER.disable_message_cache()
def close(self, result):
self._xmllogger.visit_statistics(result.statistics)
self._xmllogger.close()
LOGGER.unregister_logger(self._xmllogger)
LOGGER.output_file('Output', self._settings['Output'])
def start_suite(self, suite):
LOGGER.start_suite(suite)
def end_suite(self, suite):
LOGGER.end_suite(suite)
def start_test(self, test):
LOGGER.start_test(test)
def end_test(self, test):
LOGGER.end_test(test)
def start_keyword(self, kw):
LOGGER.start_keyword(kw)
def end_keyword(self, kw):
LOGGER.end_keyword(kw)
def message(self, msg):
LOGGER.log_message(msg)
def set_log_level(self, level):
pyloggingconf.set_level(level)
return self._xmllogger.set_log_level(level)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot import utils
from .highlighting import AnsiHighlighter, Highlighter, NoHighlighting
from .loggerhelper import IsLogged
class CommandLineMonitor(object):
def __init__(self, width=78, colors='AUTO', markers='AUTO', stdout=None,
stderr=None):
self._writer = CommandLineWriter(width, colors, markers, stdout, stderr)
self._is_logged = IsLogged('WARN')
self._started = False
self._started_keywords = 0
self._running_test = False
def start_suite(self, suite):
if not self._started:
self._writer.suite_separator()
self._started = True
self._writer.info(suite.longname, suite.doc, start_suite=True)
self._writer.suite_separator()
def end_suite(self, suite):
self._writer.info(suite.longname, suite.doc)
self._writer.status(suite.status)
self._writer.message(suite.full_message)
self._writer.suite_separator()
def start_test(self, test):
self._writer.info(test.name, test.doc)
self._running_test = True
def end_test(self, test):
self._writer.status(test.status, clear=True)
self._writer.message(test.message)
self._writer.test_separator()
self._running_test = False
def start_keyword(self, kw):
self._started_keywords += 1
def end_keyword(self, kw):
self._started_keywords -= 1
if self._running_test and not self._started_keywords:
self._writer.keyword_marker(kw)
def message(self, msg):
if self._is_logged(msg.level):
self._writer.error(msg.message, msg.level, clear=self._running_test)
def output_file(self, name, path):
self._writer.output(name, path)
class CommandLineWriter(object):
_status_length = len('| PASS |')
def __init__(self, width=78, colors='AUTO', markers='AUTO', stdout=None,
stderr=None):
self._width = width
self._stdout = stdout or sys.__stdout__
self._stderr = stderr or sys.__stderr__
self._highlighter = StatusHighlighter(colors, self._stdout, self._stderr)
self._keyword_marker = KeywordMarker(markers, self._stdout, self._highlighter)
self._last_info = None
def info(self, name, doc, start_suite=False):
width, separator = self._get_info_width_and_separator(start_suite)
self._last_info = self._get_info(name, doc, width) + separator
self._write(self._last_info, newline=False)
self._keyword_marker.reset_count()
def _get_info_width_and_separator(self, start_suite):
if start_suite:
return self._width, '\n'
return self._width - self._status_length - 1, ' '
def _get_info(self, name, doc, width):
if utils.get_console_length(name) > width:
return utils.pad_console_length(name, width)
info = name if not doc else '%s :: %s' % (name, doc.splitlines()[0])
return utils.pad_console_length(info, width)
def suite_separator(self):
self._fill('=')
def test_separator(self):
self._fill('-')
def _fill(self, char):
self._write(char * self._width)
def status(self, status, clear=False):
if self._should_clear_markers(clear):
self._clear_status()
self._highlight('| ', status, ' |')
def _should_clear_markers(self, clear):
return clear and self._keyword_marker.marking_enabled
def _clear_status(self):
self._clear_info_line()
self._rewrite_info()
def _clear_info_line(self):
self._write('\r' + ' ' * self._width + '\r', newline=False)
self._keyword_marker.reset_count()
def _rewrite_info(self):
self._write(self._last_info, newline=False)
def message(self, message):
if message:
self._write(message.strip())
def keyword_marker(self, kw):
if self._keyword_marker.marker_count == self._status_length:
self._clear_status()
self._keyword_marker.reset_count()
self._keyword_marker.mark(kw)
def error(self, message, level, clear=False):
if self._should_clear_markers(clear):
self._clear_info_line()
self._highlight('[ ', level, ' ] ' + message, error=True)
if self._should_clear_markers(clear):
self._rewrite_info()
def output(self, name, path):
self._write('%-8s %s' % (name+':', path))
def _write(self, text, newline=True, error=False):
stream = self._stdout if not error else self._stderr
if newline:
text += '\n'
stream.write(utils.encode_output(text))
stream.flush()
def _highlight(self, before, status, after, newline=True, error=False):
stream = self._stdout if not error else self._stderr
self._write(before, newline=False, error=error)
self._highlighter.highlight_status(status, stream)
self._write(after, newline=newline, error=error)
class StatusHighlighter(object):
def __init__(self, colors, *streams):
self._highlighters = dict((stream, self._get_highlighter(stream, colors))
for stream in streams)
def _get_highlighter(self, stream, colors):
auto = Highlighter if utils.isatty(stream) else NoHighlighting
highlighter = {'AUTO': auto,
'ON': Highlighter,
'FORCE': Highlighter, # compatibility with 2.5.5 and earlier
'OFF': NoHighlighting,
'ANSI': AnsiHighlighter}.get(colors.upper(), auto)
return highlighter(stream)
def highlight_status(self, status, stream):
highlighter = self._start_status_highlighting(status, stream)
stream.write(status)
highlighter.reset()
def _start_status_highlighting(self, status, stream):
highlighter = self._highlighters[stream]
{'PASS': highlighter.green,
'FAIL': highlighter.red,
'ERROR': highlighter.red,
'WARN': highlighter.yellow}[status]()
return highlighter
def highlight(self, text, color, stream):
highlighter = self._highlighters[stream]
getattr(highlighter, color)()
stream.write(text)
stream.flush()
highlighter.reset()
class KeywordMarker(object):
def __init__(self, markers, stdout, highlighter):
self._stdout = stdout
self._highlighter = highlighter
self.marking_enabled = self._marking_enabled(markers, stdout)
self.marker_count = 0
def _marking_enabled(self, markers, stdout):
auto = utils.isatty(stdout)
return {'AUTO': auto,
'ON': True,
'OFF': False}.get(markers.upper(), auto)
def mark(self, kw):
if self.marking_enabled:
marker, color = ('.', 'green') if kw.passed else ('F', 'red')
self._highlighter.highlight(marker, color, self._stdout)
self.marker_count += 1
def reset_count(self):
self.marker_count = 0
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from .loggerhelper import AbstractLogger
class FileLogger(AbstractLogger):
def __init__(self, path, level):
AbstractLogger.__init__(self, level)
self._writer = self._get_writer(path) # unit test hook
def _get_writer(self, path):
try:
return open(path, 'w')
except EnvironmentError, err:
raise DataError(err.strerror)
def message(self, msg):
if self._is_logged(msg.level) and not self._writer.closed:
entry = '%s | %s | %s\n' % (msg.timestamp, msg.level.ljust(5),
msg.message)
self._writer.write(entry.encode('UTF-8'))
def start_suite(self, suite):
self.info("Started test suite '%s'" % suite.name)
def end_suite(self, suite):
self.info("Ended test suite '%s'" % suite.name)
def start_test(self, test):
self.info("Started test case '%s'" % test.name)
def end_test(self, test):
self.info("Ended test case '%s'" % test.name)
def start_keyword(self, kw):
self.debug(lambda: "Started keyword '%s'" % kw.name)
def end_keyword(self, kw):
self.debug(lambda: "Ended keyword '%s'" % kw.name)
def output_file(self, name, path):
self.info('%s: %s' % (name, path))
def close(self):
self._writer.close()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
from .logger import LOGGER
from .loggerhelper import IsLogged
def DebugFile(path):
if not path:
LOGGER.info('No debug file')
return None
try:
outfile = open(path, 'w')
except EnvironmentError, err:
LOGGER.error("Opening debug file '%s' failed: %s" % (path, err.strerror))
return None
else:
LOGGER.info('Debug file: %s' % path)
return _DebugFileWriter(outfile)
class _DebugFileWriter:
_separators = {'SUITE': '=', 'TEST': '-', 'KW': '~'}
_setup_or_teardown = ('setup', 'teardown')
def __init__(self, outfile):
self._indent = 0
self._kw_level = 0
self._separator_written_last = False
self._outfile = outfile
self._is_logged = IsLogged('DEBUG')
def start_suite(self, suite):
self._separator('SUITE')
self._start('SUITE', suite.longname)
self._separator('SUITE')
def end_suite(self, suite):
self._separator('SUITE')
self._end('SUITE', suite.longname, suite.elapsedtime)
self._separator('SUITE')
if self._indent == 0:
LOGGER.output_file('Debug', self._outfile.name)
self.close()
def start_test(self, test):
self._separator('TEST')
self._start('TEST', test.name)
self._separator('TEST')
def end_test(self, test):
self._separator('TEST')
self._end('TEST', test.name, test.elapsedtime)
self._separator('TEST')
def start_keyword(self, kw):
if self._kw_level == 0:
self._separator('KW')
self._start(self._get_kw_type(kw), kw.name, kw.args)
self._kw_level += 1
def end_keyword(self, kw):
self._end(self._get_kw_type(kw), kw.name, kw.elapsedtime)
self._kw_level -= 1
def log_message(self, msg):
if self._is_logged(msg.level):
self._write(msg.message, level=msg.level, timestamp=msg.timestamp)
def close(self):
if not self._outfile.closed:
self._outfile.close()
def _get_kw_type(self, kw):
if kw.type in self._setup_or_teardown:
return kw.type.upper()
return 'KW'
def _start(self, type_, name, args=''):
args = ' ' + utils.seq2str2(args)
self._write('+%s START %s: %s%s' % ('-'*self._indent, type_, name, args))
self._indent += 1
def _end(self, type_, name, elapsed):
self._indent -= 1
self._write('+%s END %s: %s (%s)' % ('-'*self._indent, type_, name, elapsed))
def _separator(self, type_):
self._write(self._separators[type_] * 78, separator=True)
def _write(self, text, separator=False, level='INFO', timestamp=None):
if separator and self._separator_written_last:
return
if not separator:
text = '%s - %s - %s' % (timestamp or utils.get_timestamp(),
level, text)
self._outfile.write(text.encode('UTF-8').rstrip() + '\n')
self._outfile.flush()
self._separator_written_last = separator
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os.path
from robot import utils
from robot.errors import DataError
from robot.model import Tags
from .loggerhelper import AbstractLoggerProxy
from .logger import LOGGER
if utils.is_jython:
from java.lang import Object
from java.util import HashMap
class _RecursionAvoidingMetaclass(type):
"""Metaclass to wrap listener methods so that they cannot cause recursion.
Recursion would otherwise happen if one listener logs something and that
message is received and logged again by log_message or message method.
"""
def __new__(cls, name, bases, dct):
for attr, value in dct.items():
if not attr.startswith('_') and inspect.isroutine(value):
dct[attr] = cls._wrap_listener_method(value)
dct['_calling_method'] = False
return type.__new__(cls, name, bases, dct)
@staticmethod
def _wrap_listener_method(method):
def wrapped(self, *args):
if not self._calling_method:
self._calling_method = True
method(self, *args)
self._calling_method = False
return wrapped
class Listeners(object):
__metaclass__ = _RecursionAvoidingMetaclass
_start_attrs = ['doc', 'starttime', 'longname']
_end_attrs = _start_attrs + ['endtime', 'elapsedtime', 'status', 'message']
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __nonzero__(self):
return bool(self._listeners)
def _import_listeners(self, listener_data):
listeners = []
for name, args in listener_data:
try:
listeners.append(_ListenerProxy(name, args))
except DataError, err:
if args:
name += ':' + ':'.join(args)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (name, unicode(err)))
return listeners
def start_suite(self, suite):
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update(self._get_suite_attrs(suite))
li.call_method(li.start_suite, suite.name, attrs)
def _get_suite_attrs(self, suite):
return {
'tests' : [t.name for t in suite.tests],
'suites': [s.name for s in suite.suites],
'totaltests': suite.test_count,
'source': suite.source or ''
}
def end_suite(self, suite):
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_suite, suite.status,
suite.full_message)
else:
attrs = self._get_end_attrs(suite, 'metadata')
attrs['statistics'] = suite.stat_message
attrs.update(self._get_suite_attrs(suite))
li.call_method(li.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_test, test.name, test.doc,
list(test.tags))
else:
attrs = self._get_start_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
li.call_method(li.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
attrs['critical'] = 'yes' if test.critical else 'no'
attrs['template'] = test.template or ''
li.call_method(li.end_test, test.name, attrs)
def start_keyword(self, kw):
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, 'args', '-longname')
attrs['type'] = self._get_keyword_type(kw, start=True)
li.call_method(li.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, 'args', '-longname', '-message')
attrs['type'] = self._get_keyword_type(kw, start=False)
li.call_method(li.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def log_message(self, msg):
for li in self._listeners:
if li.version == 2:
li.call_method(li.log_message, self._create_msg_dict(msg))
def message(self, msg):
for li in self._listeners:
if li.version == 2:
li.call_method(li.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_file(self, name, path):
for li in self._listeners:
li.call_method(getattr(li, '%s_file' % name.lower()), path)
def close(self):
for li in self._listeners:
li.call_method(li.close)
def _get_start_attrs(self, item, *names):
return self._get_attrs(item, self._start_attrs, names)
def _get_end_attrs(self, item, *names):
return self._get_attrs(item, self._end_attrs, names)
def _get_attrs(self, item, defaults, extras):
names = self._get_attr_names(defaults, extras)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, defaults, extras):
names = list(defaults)
for name in extras:
if name.startswith('-'):
names.remove(name[1:])
else:
names.append(name)
return names
def _get_attr_value(self, item, name):
value = getattr(item, name)
return self._take_copy_of_mutable_value(value)
def _take_copy_of_mutable_value(self, value):
if isinstance(value, (dict, utils.NormalizedDict)):
return dict(value)
if isinstance(value, (list, tuple, Tags)):
return list(value)
return value
class _ListenerProxy(AbstractLoggerProxy):
_methods = ['start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'close']
def __init__(self, name, args):
listener = self._import_listener(name, args)
AbstractLoggerProxy.__init__(self, listener)
self.name = name
self.version = self._get_version(listener)
self.is_java = utils.is_jython and isinstance(listener, Object)
def _import_listener(self, name, args):
importer = utils.Importer('listener')
return importer.import_class_or_module(os.path.normpath(name),
instantiate_with_args=args)
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except ValueError:
return 1
def call_method(self, method, *args):
if self.is_java:
args = [self._to_map(a) if isinstance(a, dict) else a for a in args]
try:
method(*args)
except:
message, details = utils.get_error_details()
LOGGER.error("Calling listener method '%s' of listener '%s' failed: %s"
% (method.__name__, self.name, message))
LOGGER.info("Details:\n%s" % details)
def _to_map(self, dictionary):
map = HashMap()
for key, value in dictionary.iteritems():
map.put(key, value)
return map
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from robot import utils
from . import librarylogger
LEVELS = {'TRACE': logging.NOTSET,
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARN': logging.WARNING}
def initialize(level):
logging.raiseExceptions = False
logging.getLogger().addHandler(RobotHandler())
set_level(level)
def set_level(level):
try:
level = LEVELS[level.upper()]
except KeyError:
return
logging.getLogger().setLevel(level)
class RobotHandler(logging.Handler):
def emit(self, record):
message, error = self._get_message(record)
method = self._get_logger_method(record.levelno)
method(message)
if error:
librarylogger.debug(error)
def _get_message(self, record):
try:
return record.getMessage(), None
except:
message = 'Failed to log following message properly: %s' \
% utils.unic(record.msg)
error = '\n'.join(utils.get_error_details())
return message, error
def _get_logger_method(self, level):
if level >= logging.WARNING:
return librarylogger.warn
if level >= logging.INFO:
return librarylogger.info
if level >= logging.DEBUG:
return librarylogger.debug
return librarylogger.trace
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.errors import DataError
from .filelogger import FileLogger
from .loggerhelper import AbstractLogger, AbstractLoggerProxy
from .monitor import CommandLineMonitor
from .stdoutlogsplitter import StdoutLogSplitter
class Logger(AbstractLogger):
"""A global logger proxy to which new loggers may be registered.
Whenever something is written to LOGGER in code, all registered loggers are
notified. Messages are also cached and cached messages written to new
loggers when they are registered.
Tools using Robot Framework's internal modules should register their own
loggers at least to get notifications about errors and warnings. A shortcut
to get errors/warnings into console is using 'register_console_logger'.
"""
def __init__(self, register_console_logger=True):
self._loggers = LoggerCollection()
self._message_cache = []
self._console_logger = None
self._started_keywords = 0
if register_console_logger:
self.register_console_logger()
def disable_message_cache(self):
self._message_cache = None
def register_logger(self, *loggers):
for log in loggers:
logger = self._loggers.register_regular_logger(log)
self._relay_cached_messages_to(logger)
def register_context_changing_logger(self, logger):
log = self._loggers.register_context_changing_logger(logger)
self._relay_cached_messages_to(log)
def _relay_cached_messages_to(self, logger):
if self._message_cache:
for msg in self._message_cache[:]:
logger.message(msg)
def unregister_logger(self, *loggers):
for log in loggers:
self._loggers.unregister_logger(log)
def register_console_logger(self, width=78, colors='AUTO', markers='AUTO',
stdout=None, stderr=None):
logger = CommandLineMonitor(width, colors, markers, stdout, stderr)
if self._console_logger:
self._loggers.unregister_logger(self._console_logger)
self._console_logger = logger
self._loggers.register_regular_logger(logger)
def unregister_console_logger(self):
if not self._console_logger:
return None
logger = self._console_logger
self._loggers.unregister_logger(logger)
self._console_logger = None
return logger
# TODO: Remove in RF 2.9. Not used outside utests since 2.8.4 but may
# be used by external tools. Need to check that before removal.
disable_automatic_console_logger = unregister_console_logger
def register_file_logger(self, path=None, level='INFO'):
if not path:
path = os.environ.get('ROBOT_SYSLOG_FILE', 'NONE')
level = os.environ.get('ROBOT_SYSLOG_LEVEL', level)
if path.upper() == 'NONE':
return
try:
logger = FileLogger(path, level)
except DataError, err:
self.error("Opening syslog file '%s' failed: %s" % (path, unicode(err)))
else:
self.register_logger(logger)
def message(self, msg):
"""Messages about what the framework is doing, warnings, errors, ..."""
for logger in self._loggers.all_loggers():
logger.message(msg)
if self._message_cache is not None:
self._message_cache.append(msg)
def _log_message(self, msg):
"""Log messages written (mainly) by libraries"""
for logger in self._loggers.all_loggers():
logger.log_message(msg)
if msg.level == 'WARN':
self.message(msg)
log_message = message
def log_output(self, output):
for msg in StdoutLogSplitter(output):
self.log_message(msg)
def enable_library_import_logging(self):
self._prev_log_message = self.log_message
self.log_message = self.message
def disable_library_import_logging(self):
self.log_message = self._prev_log_message
def output_file(self, name, path):
"""Finished output, report, log, debug, or xunit file"""
for logger in self._loggers.all_loggers():
logger.output_file(name, path)
def close(self):
for logger in self._loggers.all_loggers():
logger.close()
self._loggers = LoggerCollection()
self._message_cache = []
def start_suite(self, suite):
for logger in self._loggers.starting_loggers():
logger.start_suite(suite)
def end_suite(self, suite):
for logger in self._loggers.ending_loggers():
logger.end_suite(suite)
def start_test(self, test):
for logger in self._loggers.starting_loggers():
logger.start_test(test)
def end_test(self, test):
for logger in self._loggers.ending_loggers():
logger.end_test(test)
def start_keyword(self, keyword):
self._started_keywords += 1
self.log_message = self._log_message
for logger in self._loggers.starting_loggers():
logger.start_keyword(keyword)
def end_keyword(self, keyword):
self._started_keywords -= 1
for logger in self._loggers.ending_loggers():
logger.end_keyword(keyword)
if not self._started_keywords:
self.log_message = self.message
def __iter__(self):
return iter(self._loggers)
class LoggerCollection(object):
def __init__(self):
self._regular_loggers = []
self._context_changing_loggers = []
def register_regular_logger(self, logger):
self._regular_loggers.append(_LoggerProxy(logger))
return self._regular_loggers[-1]
def register_context_changing_logger(self, logger):
self._context_changing_loggers.append(_LoggerProxy(logger))
return self._context_changing_loggers[-1]
# TODO: Remove in RF 2.9. Doesn't seem to be used anywhere since 2.8.4.
def remove_first_regular_logger(self):
return self._regular_loggers.pop(0)
def unregister_logger(self, logger):
self._regular_loggers = [proxy for proxy in self._regular_loggers
if proxy.logger is not logger]
self._context_changing_loggers = [proxy for proxy
in self._context_changing_loggers
if proxy.logger is not logger]
def starting_loggers(self):
return self.all_loggers()
def ending_loggers(self):
return self._regular_loggers + self._context_changing_loggers
def all_loggers(self):
return self._context_changing_loggers + self._regular_loggers
def __iter__(self):
return iter(self.all_loggers())
class _LoggerProxy(AbstractLoggerProxy):
_methods = ['message', 'log_message', 'output_file', 'close',
'start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword']
LOGGER = Logger()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Windows highlighting code adapted from color_console.py. It is copyright
# Andre Burgaud, licensed under the MIT License, and available here:
# http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
import os
import sys
try:
from ctypes import windll, Structure, c_short, c_ushort, byref
except ImportError: # Not on Windows or using Jython
windll = None
def Highlighter(stream):
if os.sep == '/':
return AnsiHighlighter(stream)
return DosHighlighter(stream) if windll else NoHighlighting(stream)
class AnsiHighlighter(object):
_ANSI_GREEN = '\033[32m'
_ANSI_RED = '\033[31m'
_ANSI_YELLOW = '\033[33m'
_ANSI_RESET = '\033[0m'
def __init__(self, stream):
self._stream = stream
def green(self):
self._set_color(self._ANSI_GREEN)
def red(self):
self._set_color(self._ANSI_RED)
def yellow(self):
self._set_color(self._ANSI_YELLOW)
def reset(self):
self._set_color(self._ANSI_RESET)
def _set_color(self, color):
self._stream.write(color)
class NoHighlighting(AnsiHighlighter):
def _set_color(self, color):
pass
class DosHighlighter(object):
_FOREGROUND_GREEN = 0x2
_FOREGROUND_RED = 0x4
_FOREGROUND_YELLOW = 0x6
_FOREGROUND_GREY = 0x7
_FOREGROUND_INTENSITY = 0x8
_BACKGROUND_MASK = 0xF0
_STDOUT_HANDLE = -11
_STDERR_HANDLE = -12
def __init__(self, stream):
self._handle = self._get_std_handle(stream)
self._orig_colors = self._get_colors()
self._background = self._orig_colors & self._BACKGROUND_MASK
def green(self):
self._set_foreground_colors(self._FOREGROUND_GREEN)
def red(self):
self._set_foreground_colors(self._FOREGROUND_RED)
def yellow(self):
self._set_foreground_colors(self._FOREGROUND_YELLOW)
def reset(self):
self._set_colors(self._orig_colors)
def _get_std_handle(self, stream):
handle = self._STDOUT_HANDLE \
if stream is sys.__stdout__ else self._STDERR_HANDLE
return windll.kernel32.GetStdHandle(handle)
def _get_colors(self):
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
ok = windll.kernel32.GetConsoleScreenBufferInfo(self._handle, byref(csbi))
if not ok: # Call failed, return default console colors (gray on black)
return self._FOREGROUND_GREY
return csbi.wAttributes
def _set_foreground_colors(self, colors):
self._set_colors(colors | self._FOREGROUND_INTENSITY | self._background)
def _set_colors(self, colors):
windll.kernel32.SetConsoleTextAttribute(self._handle, colors)
if windll:
class _COORD(Structure):
_fields_ = [("X", c_short),
("Y", c_short)]
class _SMALL_RECT(Structure):
_fields_ = [("Left", c_short),
("Top", c_short),
("Right", c_short),
("Bottom", c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", _COORD),
("dwCursorPosition", _COORD),
("wAttributes", c_ushort),
("srWindow", _SMALL_RECT),
("dwMaximumWindowSize", _COORD)]
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the public test library logging API.
This is exposed via :py:mod:`robot.api.logger`. Implementation must reside
here to avoid cyclic imports.
"""
import sys
import threading
from robot.utils import unic, encode_output
from .logger import LOGGER
from .loggerhelper import Message
LOGGING_THREADS = ('MainThread', 'RobotFrameworkTimeoutThread')
def write(msg, level, html=False):
# Callable messages allow lazy logging internally, but we don't want to
# expose this functionality publicly. See the following issue for details:
# http://code.google.com/p/robotframework/issues/detail?id=1505
if callable(msg):
msg = unic(msg)
if threading.currentThread().getName() in LOGGING_THREADS:
LOGGER.log_message(Message(msg, level, html))
def trace(msg, html=False):
write(msg, 'TRACE', html)
def debug(msg, html=False):
write(msg, 'DEBUG', html)
def info(msg, html=False, also_console=False):
write(msg, 'INFO', html)
if also_console:
console(msg)
def warn(msg, html=False):
write(msg, 'WARN', html)
def console(msg, newline=True, stream='stdout'):
msg = unic(msg)
if newline:
msg += '\n'
stream = sys.__stdout__ if stream.lower() != 'stderr' else sys.__stderr__
stream.write(encode_output(msg))
stream.flush()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.utils import format_time
from .loggerhelper import Message, LEVELS
class StdoutLogSplitter(object):
"""Splits messages logged through stdout (or stderr) into Message objects"""
_split_from_levels = re.compile('^(?:\*'
'(%s|HTML)' # Level
'(:\d+(?:\.\d+)?)?' # Optional timestamp
'\*)' % '|'.join(LEVELS), re.MULTILINE)
def __init__(self, output):
self._messages = list(self._get_messages(output.strip()))
def _get_messages(self, output):
for level, timestamp, msg in self._split_output(output):
if timestamp:
timestamp = self._format_timestamp(timestamp[1:])
yield Message(msg.strip(), level, timestamp=timestamp)
def _split_output(self, output):
tokens = self._split_from_levels.split(output)
tokens = self._add_initial_level_and_time_if_needed(tokens)
for i in xrange(0, len(tokens), 3):
yield tokens[i:i+3]
def _add_initial_level_and_time_if_needed(self, tokens):
if self._output_started_with_level(tokens):
return tokens[1:]
return ['INFO', None] + tokens
def _output_started_with_level(self, tokens):
return tokens[0] == ''
def _format_timestamp(self, millis):
return format_time(float(millis)/1000, millissep='.')
def __iter__(self):
return iter(self._messages)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for internal logging and other output.
Not part of the public API, and also subject to change in the future when
test execution is refactored.
"""
from .output import Output
from .logger import LOGGER
from .xmllogger import XmlLogger
from .loggerhelper import LEVELS, Message
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from org.robotframework import RobotRunner
from robot import run_cli, rebot_cli
from robot.libdoc import libdoc_cli
from robot.tidy import tidy_cli
from robot.testdoc import testdoc_cli
USAGE = """robotframework.jar - Robot Framework runner.
Usage: java -jar robotframework.jar [command] [options] [input(s)]
Available commands:
run - Run Robot Framework tests. The default, if no command is given.
rebot - Post process Robot Framework output files.
libdoc - Create test library or resource file documentation.
tidy - Clean-up and changed format of test data files.
testdoc - Create documentation from Robot Framework test data files.
Run `java -jar robotframework.jar command --help` for more information about
an individual command.
Examples:
java -jar robotframework.jar mytests.txt
java -jar robotframework.jar run mytests.txt
java -jar robotframework.jar rebot --log mylog.html out.xml
java -jar robotframework.jar tidy --format txt mytests.html
"""
class JarRunner(RobotRunner):
"""Used for Java-Jython interop when RF is executed from .jar file"""
_commands = {'run': run_cli, 'rebot': rebot_cli, 'tidy': tidy_cli,
'libdoc': libdoc_cli, 'testdoc': testdoc_cli}
def run(self, args):
try:
self._run(args)
except SystemExit, err:
return err.code
def _run(self, args):
if not args or args[0] in ('-h', '--help'):
print USAGE
raise SystemExit(0)
command, args = self._parse_command_line(args)
command(args) # Always calls sys.exit()
def _parse_command_line(self, args):
try:
return self._commands[args[0]], args[1:]
except KeyError:
return run_cli, args
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The root of the Robot Framework package.
The command line entry points provided by the framework are exposed for
programmatic usage as follows:
* :func:`~robot.run.run`: Function to run tests.
* :func:`~robot.run.run_cli`: Function to run tests
with command line argument processing.
* :func:`~robot.rebot.rebot`: Function to post-process outputs.
* :func:`~robot.rebot.rebot_cli`: Function to post-process outputs
with command line argument processing.
* :mod:`~robot.libdoc`: Module for library documentation generation.
* :mod:`~robot.testdoc`: Module for test case documentation generation.
* :mod:`~robot.tidy`: Module for test data clean-up and format change.
All the functions above can be imported like ``from robot import run``.
Functions and classes provided by the modules need to be imported like
``from robot.libdoc import libdoc_cli``.
The functions and modules listed above are considered stable. Other modules in
this package are for for internal usage and may change without prior notice.
.. tip:: More public APIs are exposed by the :mod:`robot.api` package.
"""
import sys
if 'pythonpathsetter' not in sys.modules:
from robot import pythonpathsetter as _
if sys.platform.startswith('java'):
from robot import jythonworkarounds as _
from robot.rebot import rebot, rebot_cli
from robot.run import run, run_cli
from robot.version import get_version
__all__ = ['run', 'run_cli', 'rebot', 'rebot_cli']
__version__ = get_version()
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for post-processing outputs.
This module can be executed from the command line using the following
approaches::
python -m robot.rebot
python path/to/robot/rebot.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module is also used by the installed ``rebot``, ``jyrebot`` and
``ipyrebot`` start-up scripts.
This module also provides :func:`rebot` and :func:`rebot_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
USAGE = """Rebot -- Robot Framework report and log generator
Version: <VERSION>
Usage: rebot|jyrebot|ipyrebot [options] robot_outputs
or: python|jython|ipy -m robot.rebot [options] robot_outputs
or: python|jython|ipy path/to/robot/rebot.py [options] robot_outputs
or: java -jar robotframework.jar rebot [options] robot_outputs
Rebot can be used to generate logs and reports in HTML format. It can also
produce new XML output files which can be further processed with Rebot or
other tools.
Inputs to Rebot are XML output files generated by Robot Framework test runs or
earlier Rebot executions. When more than one input file is given, a new top
level test suite containing suites in the given files is created by default.
This allows combining multiple outputs together to create higher level reports.
An exception is that if --rerunmerge is used, results are combined by replacing
original tests with tests in the latter test runs.
Depending is Robot Framework installed using Python, Jython, or IronPython
interpreter, Rebot can be run using `rebot`, `jyrebot` or `ipyrebot` command,
respectively. Alternatively, it is possible to directly execute `robot.rebot`
module (e.g. `python -m robot.rebot`) or `robot/rebot.py` script using a
selected interpreter. Finally, there is also a standalone JAR distribution.
For more information about Robot Framework run, for example, `pybot --help` or
go to http://robotframework.org.
Options
=======
-N --name name Set the name of the top level test suite. Underscores
in the name are converted to spaces. Default name is
created from the name of the executed data source.
-D --doc documentation Set the documentation of the top level test suite.
Underscores in the documentation are converted to
spaces and it may also contain simple HTML formatting
(e.g. *bold* and http://url/).
-M --metadata name:value * Set metadata of the top level test suite.
Underscores in the name and value are converted to
spaces. Value can contain same HTML formatting as
--doc. Example: `--metadata version:1.2`
-G --settag tag * Sets given tag(s) to all executed test cases.
-t --test name * Select test cases by name or long name. Name is case
and space insensitive and it can also be a simple
pattern where `*` matches anything and `?` matches
any char. If using `*` and `?` in the console is
problematic, see --escape and --argumentfile.
-s --suite name * Select test suites by name. When this option is used
with --test, --include or --exclude, only test cases
in matching suites and also matching other filtering
criteria are selected. Given name can be a simple
pattern similarly as with --test.
-i --include tag * Select test cases to by tag. Similarly as name with
--test, tag is case and space insensitive and it is
possible to use patterns with `*` and `?` as
wildcards. Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT` operators.
Examples: --include foo --include bar*
--include fooANDbar*
-e --exclude tag * Select test cases not to be included by tag. These
tests are not selected even if included with
--include. Tags are matched using the rules explained
with --include.
-R --rerunmerge Combine results of re-running tests so that tests in
the latter runs replace the original. Typically used
after using --rerunfailed option when running tests.
Example: rebot --rerunmerge orig.xml rerun.xml
--processemptysuite Processes output also if the top level test suite is
empty. Useful e.g. with --include/--exclude when it
is not an error that no test matches the condition.
-c --critical tag * Tests having given tag are considered critical. If no
critical tags are set, all tags are critical. Tags
can be given as a pattern like with --include.
-n --noncritical tag * Tests with given tag are not critical even if they
have a tag set with --critical. Tag can be a pattern.
-d --outputdir dir Where to create output files. The default is the
directory where Rebot is run from and the given path
is considered relative to that unless it is absolute.
-o --output file XML output file. Not created unless this option is
specified. Given path, similarly as paths given to
--log, --report and --xunit, is relative to
--outputdir unless given as an absolute path.
-l --log file HTML log file. Can be disabled by giving a special
name `NONE`. Default: log.html
Examples: `--log mylog.html`, `-l none`
-r --report file HTML report file. Can be disabled with `NONE`
similarly as --log. Default: report.html
-x --xunit file xUnit compatible result file. Not created unless this
option is specified.
--xunitfile file Deprecated. Use --xunit instead.
--xunitskipnoncritical Mark non-critical tests on xUnit output as skipped.
-T --timestampoutputs When this option is used, timestamp in a format
`YYYYMMDD-hhmmss` is added to all generated output
files between their basename and extension. For
example `-T -o output.xml -r report.html -l none`
creates files like `output-20070503-154410.xml` and
`report-20070503-154410.html`.
--splitlog Split log file into smaller pieces that open in
browser transparently.
--logtitle title Title for the generated test log. The default title
is `<Name Of The Suite> Test Log`. Underscores in
the title are converted into spaces in all titles.
--reporttitle title Title for the generated test report. The default
title is `<Name Of The Suite> Test Report`.
--reportbackground colors Background colors to use in the report file.
Either `all_passed:critical_passed:failed` or
`passed:failed`. Both color names and codes work.
Examples: --reportbackground green:yellow:red
--reportbackground #00E:#E00
-L --loglevel level Threshold for selecting messages. Available levels:
TRACE (default), DEBUG, INFO, WARN, NONE (no msgs).
Use syntax `LOGLEVEL:DEFAULT` to define the default
visible log level in log files.
Examples: --loglevel DEBUG
--loglevel DEBUG:INFO
--suitestatlevel level How many levels to show in `Statistics by Suite`
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in `Statistics by Tag`
and `Test Details` in log and report. By default all
tags set in test cases are shown. Given `tag` can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from `Statistics by Tag` and
`Test Details`. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into `Statistics by Tag`
and matching tests into `Test Details`. If optional
`name` is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine requirement-*
--tagstatcombine tag1ANDtag2:My_name
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in `Test Details` and also as
a tooltip in `Statistics by Tag`. Pattern can contain
characters `*` (matches anything) and `?` (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into `Statistics by
Tag`. Pattern can contain characters `*` (matches
anything) and `?` (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--removekeywords all|passed|name:<pattern>|for|wuks|none * Remove keyword
data from all generated outputs. Keywords containing
warnings are not removed except in `all` mode.
all: remove data from all keywords
passed: remove data only from keywords in passed
test cases and suites
name:<pattern>: remove data from keywords that match
the given pattern. The pattern is matched
against the full name of the keyword (e.g.
'MyLib.Keyword', 'resource.Second Keyword'),
is case, space, and underscore insensitive,
and may contain `*` and `?` as wildcards.
Examples: --removekeywords name:Lib.HugeKw
--removekeywords name:myresource.*
for: remove passed iterations from for loops
wuks: remove all but the last failing keyword
inside `BuiltIn.Wait Until Keyword Succeeds`
--flattenkeywords name:<pattern> * Flattens matching keywords. Matching
keywords get all messages from their child keywords
and children are discarded otherwise. Matching rules
are same as with `--removekeywords name:<pattern>`.
--starttime timestamp Set starting time of test execution when creating
reports. Timestamp must be given in format
`2007-10-01 15:12:42.268` where all separators are
optional (e.g. `20071001151242268` is ok too) and
parts from milliseconds to hours can be omitted if
they are zero (e.g. `2007-10-01`). This can be used
to override starttime of the suite when reports are
created from a single suite or to set starttime for
combined suite, which is otherwise set to `N/A`.
--endtime timestamp Same as --starttime but for ending time. If both
options are used, elapsed time of the suite is
calculated based on them. For combined suites,
it is otherwise calculated by adding elapsed times
of combined test suites together.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
-C --monitorcolors auto|on|ansi|off Use colors on console output or not.
auto: use colors when output not redirected (default)
on: always use colors
ansi: like `on` but use ANSI colors also on Windows
off: disable colors altogether
Note that colors do not work with Jython on Windows.
-E --escape what:with * Escape characters which are problematic in console.
`what` is the name of the character to escape and
`with` is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<---------------------ESCAPES----------------------->
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. File can have
both options and data sources one per line. Contents
do not need to be escaped but spaces in the beginning
and end of lines are removed. Empty lines and lines
starting with a hash character (#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example, `--test first --test third` selects test cases with name `first`
and `third`. If other options are given multiple times, the last value is used.
Long option format is case-insensitive. For example, --SuiteStatLevel is
equivalent to but easier to read than --suitestatlevel. Long options can
also be shortened as long as they are unique. For example, `--logti Title`
works while `--lo log.html` does not because the former matches only --logtitle
but the latter matches both --log and --logtitle.
Environment Variables
=====================
REBOT_OPTIONS Space separated list of default options to be placed
in front of any explicit options on the command line.
ROBOT_SYSLOG_FILE Path to a file where Robot Framework writes internal
information about processed files. Can be useful when
debugging problems. If not set, or set to special
value `NONE`, writing to the syslog file is disabled.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as for --loglevel
command line option and the default is INFO.
Examples
========
# Simple Rebot run that creates log and report with default names.
$ rebot output.xml
# Using options. Note that this is one long command split into multiple lines.
$ rebot --log smoke_log.html --report smoke_report.html --include smoke
--ReportTitle Smoke_Tests --ReportBackground green:yellow:red
--TagStatCombine tag1ANDtag2 path/to/myoutput.xml
# Executing `robot.rebot` module using Python and creating combined outputs.
$ python -m robot.rebot outputs/*.xml
# Running `robot/rebot.py` script with Jython.
$ jython path/robot/rebot.py -N Project_X -l none -r x.html output.xml
"""
import sys
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.reporting import ResultWriter
from robot.output import LOGGER
from robot.utils import Application
from robot.run import RobotFramework
class Rebot(RobotFramework):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(1,),
env_options='REBOT_OPTIONS', logger=LOGGER)
def main(self, datasources, **options):
settings = RebotSettings(options)
LOGGER.register_console_logger(**settings.console_logger_config)
LOGGER.disable_message_cache()
rc = ResultWriter(*datasources).write_results(settings)
if rc < 0:
raise DataError('No outputs created.')
return rc
def rebot_cli(arguments):
"""Command line execution entry point for running rebot.
:param arguments: Command line arguments as a list of strings.
For programmatic usage the :func:`rebot` method is typically better. It has
a better API for that usage and does not call :func:`sys.exit` like this
method.
Example::
from robot import rebot_cli
rebot_cli(['--report', 'r.html', '--log', 'NONE', 'o1.xml', 'o2.xml'])
"""
Rebot().execute_cli(arguments)
def rebot(*datasources, **options):
"""Creates reports/logs from given Robot output files with given options.
Given input files are paths to Robot output files similarly as when running
rebot from the command line. Options are given as keywords arguments and
their names are same as long command line options without hyphens.
Options that can be given on the command line multiple times can be
passed as lists like `include=['tag1', 'tag2']`. If such option is used
only once, it can be given also as a single string like `include='tag'`.
To capture stdout and/or stderr streams, pass open file objects in as
special keyword arguments `stdout` and `stderr`, respectively.
A return code is returned similarly as when running on the command line.
Examples::
from robot import rebot
rebot('path/to/output.xml')
with open('stdout.txt', 'w') as stdout:
rebot('o1.xml', 'o2.xml', report='r.html', log='NONE', stdout=stdout)
Equivalent command line usage::
rebot path/to/output.xml
rebot --report r.html --log NONE o1.xml o2.xml > stdout.txt
"""
return Rebot().execute(*datasources, **options)
if __name__ == '__main__':
rebot_cli(sys.argv[1:])
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from visitor import SuiteVisitor
class TagSetter(SuiteVisitor):
def __init__(self, add=None, remove=None):
self.add = add
self.remove = remove
def start_suite(self, suite):
return bool(self)
def visit_test(self, test):
test.tags.add(self.add)
test.tags.remove(self.remove)
def visit_keyword(self, keyword):
pass
def __nonzero__(self):
return bool(self.add or self.remove)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
from robot.errors import DataError
from .visitor import SuiteVisitor
class SuiteConfigurer(SuiteVisitor):
def __init__(self, name=None, doc=None, metadata=None, set_tags=None,
include_tags=None, exclude_tags=None, include_suites=None,
include_tests=None, empty_suite_ok=False):
self.name = name
self.doc = doc
self.metadata = metadata
self.set_tags = set_tags or []
self.include_tags = include_tags
self.exclude_tags = exclude_tags
self.include_suites = include_suites
self.include_tests = include_tests
self.empty_suite_ok = empty_suite_ok
@property
def add_tags(self):
return [t for t in self.set_tags if not t.startswith('-')]
@property
def remove_tags(self):
return [t[1:] for t in self.set_tags if t.startswith('-')]
def visit_suite(self, suite):
self._set_suite_attributes(suite)
self._filter(suite)
suite.set_tags(self.add_tags, self.remove_tags)
def _set_suite_attributes(self, suite):
if self.name:
suite.name = self.name
if self.doc:
suite.doc = self.doc
if self.metadata:
suite.metadata.update(self.metadata)
def _filter(self, suite):
name = suite.name
suite.filter(self.include_suites, self.include_tests,
self.include_tags, self.exclude_tags)
if not (suite.test_count or self.empty_suite_ok):
self._raise_no_tests_error(name)
def _raise_no_tests_error(self, suite):
selectors = '%s %s' % (self._get_test_selector_msgs(),
self._get_suite_selector_msg())
msg = "Suite '%s' contains no tests %s" % (suite, selectors.strip())
raise DataError(msg.strip() + '.')
def _get_test_selector_msgs(self):
parts = []
for explanation, selector in [('with tags', self.include_tags),
('without tags', self.exclude_tags),
('named', self.include_tests)]:
if selector:
parts.append(self._format_selector_msg(explanation, selector))
return utils.seq2str(parts, quote='')
def _format_selector_msg(self, explanation, selector):
if len(selector) == 1 and explanation[-1] == 's':
explanation = explanation[:-1]
return '%s %s' % (explanation, utils.seq2str(selector, lastsep=' or '))
def _get_suite_selector_msg(self):
if not self.include_suites:
return ''
return self._format_selector_msg('in suites', self.include_suites)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import html_escape
from .itemlist import ItemList
from .modelobject import ModelObject
class Message(ModelObject):
"""A message outputted during the test execution.
The message can be a log message triggered by a keyword, or a warning
or an error occurred during the test execution.
"""
__slots__ = ['message', 'level', 'html', 'timestamp', 'parent']
def __init__(self, message='', level='INFO', html=False, timestamp=None,
parent=None):
#: The message content as a string.
self.message = message
#: Severity of the message. Either ``TRACE``, ``INFO``,
#: ``WARN``, ``DEBUG`` or ``FAIL``/``ERROR``.
self.level = level
#: ``True`` if the content is in HTML, ``False`` otherwise.
self.html = html
#: Timestamp in format ``%Y%m%d %H:%M:%S.%f``.
self.timestamp = timestamp
#: The object this message was triggered by.
self.parent = parent
@property
def html_message(self):
"""Returns the message content as HTML."""
return self.message if self.html else html_escape(self.message)
def visit(self, visitor):
visitor.visit_message(self)
def __unicode__(self):
return self.message
class Messages(ItemList):
__slots__ = []
def __init__(self, message_class=Message, parent=None, messages=None):
ItemList.__init__(self, message_class, {'parent': parent}, messages)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tags import TagPatterns
class Criticality(object):
def __init__(self, critical_tags=None, non_critical_tags=None):
self.critical_tags = self._get_tag_patterns(critical_tags)
self.non_critical_tags = self._get_tag_patterns(non_critical_tags)
def _get_tag_patterns(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
def tag_is_critical(self, tag):
return self.critical_tags.match(tag)
def tag_is_non_critical(self, tag):
return self.non_critical_tags.match(tag)
def test_is_critical(self, test):
if self.critical_tags and not self.critical_tags.match(test.tags):
return False
return not self.non_critical_tags.match(test.tags)
def __nonzero__(self):
return bool(self.critical_tags or self.non_critical_tags)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import Matcher, NormalizedDict, setter
class Tags(object):
def __init__(self, tags=None):
self._tags = tags
@setter
def _tags(self, tags):
if not tags:
return ()
if isinstance(tags, basestring):
tags = (tags,)
return self._normalize(tags)
def _normalize(self, tags):
normalized = NormalizedDict(((t, 1) for t in tags), ignore='_')
for removed in '', 'NONE':
if removed in normalized:
normalized.pop(removed)
return tuple(normalized)
def add(self, tags):
self._tags = tuple(self) + tuple(Tags(tags))
def remove(self, tags):
tags = TagPatterns(tags)
self._tags = [t for t in self if not tags.match(t)]
def match(self, tags):
return TagPatterns(tags).match(self)
def __contains__(self, tags):
return self.match(tags)
def __len__(self):
return len(self._tags)
def __iter__(self):
return iter(self._tags)
def __unicode__(self):
return u'[%s]' % ', '.join(self)
def __repr__(self):
return repr(list(self))
def __str__(self):
return unicode(self).encode('UTF-8')
def __getitem__(self, index):
item = self._tags[index]
return item if not isinstance(index, slice) else Tags(item)
def __add__(self, other):
return Tags(tuple(self) + tuple(Tags(other)))
class TagPatterns(object):
def __init__(self, patterns):
self._patterns = tuple(TagPattern(p) for p in Tags(patterns))
def match(self, tags):
tags = tags if isinstance(tags, Tags) else Tags(tags)
return any(p.match(tags) for p in self._patterns)
def __contains__(self, tag):
return self.match(tag)
def __len__(self):
return len(self._patterns)
def __iter__(self):
return iter(self._patterns)
def __getitem__(self, index):
return self._patterns[index]
def TagPattern(pattern):
if 'NOT' in pattern:
return _NotTagPattern(*pattern.split('NOT'))
if 'OR' in pattern:
return _OrTagPattern(pattern.split('OR'))
if 'AND' in pattern or '&' in pattern:
return _AndTagPattern(pattern.replace('&', 'AND').split('AND'))
return _SingleTagPattern(pattern)
class _SingleTagPattern(object):
def __init__(self, pattern):
self._matcher = Matcher(pattern, ignore='_')
def match(self, tags):
return self._matcher.match_any(tags)
def __unicode__(self):
return self._matcher.pattern
class _AndTagPattern(object):
def __init__(self, patterns):
self._patterns = tuple(TagPattern(p) for p in patterns)
def match(self, tags):
return all(p.match(tags) for p in self._patterns)
class _OrTagPattern(object):
def __init__(self, patterns):
self._patterns = tuple(TagPattern(p) for p in patterns)
def match(self, tags):
return any(p.match(tags) for p in self._patterns)
class _NotTagPattern(object):
def __init__(self, must_match, *must_not_match):
self._first = TagPattern(must_match)
self._rest = _OrTagPattern(must_not_match)
def match(self, tags):
return self._first.match(tags) and not self._rest.match(tags)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.utils import NormalizedDict
from .criticality import Criticality
from .stats import TagStat, CombinedTagStat
from .tags import TagPatterns
class TagStatistics(object):
"""Container for tag statistics.
"""
def __init__(self, combined_stats):
#: Dictionary, where key is the name of the tag as a string and value
#: is an instance of :class:`~robot.model.stats.TagStat`.
self.tags = NormalizedDict(ignore=['_'])
#: Dictionary, where key is the name of the created tag as a string
# and value is an instance of :class:`~robot.model.stats.TagStat`.
self.combined = combined_stats
def visit(self, visitor):
visitor.visit_tag_statistics(self)
def __iter__(self):
return iter(sorted(self.tags.values() + self.combined))
class TagStatisticsBuilder(object):
def __init__(self, criticality=None, included=None, excluded=None,
combined=None, docs=None, links=None):
self._included = TagPatterns(included)
self._excluded = TagPatterns(excluded)
self._info = TagStatInfo(criticality, docs, links)
self.stats = TagStatistics(self._info.get_combined_stats(combined))
def add_test(self, test):
self._add_tags_to_statistics(test)
self._add_to_combined_statistics(test)
def _add_tags_to_statistics(self, test):
for tag in test.tags:
if self._is_included(tag):
if tag not in self.stats.tags:
self.stats.tags[tag] = self._info.get_stat(tag)
self.stats.tags[tag].add_test(test)
def _is_included(self, tag):
if self._included and not self._included.match(tag):
return False
return not self._excluded.match(tag)
def _add_to_combined_statistics(self, test):
for comb in self.stats.combined:
if comb.match(test.tags):
comb.add_test(test)
class TagStatInfo(object):
def __init__(self, criticality=None, docs=None, links=None):
self._criticality = criticality or Criticality()
self._docs = [TagStatDoc(*doc) for doc in docs or []]
self._links = [TagStatLink(*link) for link in links or []]
def get_stat(self, tag):
return TagStat(tag, self.get_doc(tag), self.get_links(tag),
self._criticality.tag_is_critical(tag),
self._criticality.tag_is_non_critical(tag))
def get_combined_stats(self, combined=None):
return [self.get_combined_stat(*comb) for comb in combined or []]
def get_combined_stat(self, pattern, name=None):
name = name or pattern
return CombinedTagStat(pattern, name, self.get_doc(name),
self.get_links(name))
def get_doc(self, tag):
return ' & '.join(doc.text for doc in self._docs if doc.match(tag))
def get_links(self, tag):
return [link.get_link(tag) for link in self._links if link.match(tag)]
class TagStatDoc(object):
def __init__(self, pattern, doc):
self._matcher = TagPatterns(pattern)
self.text = doc
def match(self, tag):
return self._matcher.match(tag)
class TagStatLink(object):
_match_pattern_tokenizer = re.compile('(\*|\?+)')
def __init__(self, pattern, link, title):
self._regexp = self._get_match_regexp(pattern)
self._link = link
self._title = title.replace('_', ' ')
def match(self, tag):
return self._regexp.match(tag) is not None
def get_link(self, tag):
match = self._regexp.match(tag)
if not match:
return None
link, title = self._replace_groups(self._link, self._title, match)
return link, title
def _replace_groups(self, link, title, match):
for index, group in enumerate(match.groups()):
placefolder = '%%%d' % (index+1)
link = link.replace(placefolder, group)
title = title.replace(placefolder, group)
return link, title
def _get_match_regexp(self, pattern):
pattern = '^%s$' % ''.join(self._yield_match_pattern(pattern))
return re.compile(pattern, re.IGNORECASE)
def _yield_match_pattern(self, pattern):
for token in self._match_pattern_tokenizer.split(pattern):
if token.startswith('?'):
yield '(%s)' % ('.'*len(token))
elif token == '*':
yield '(.*)'
else:
yield re.escape(token)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .stats import SuiteStat
class SuiteStatistics(object):
"""Container for suite statistics."""
def __init__(self, suite):
#: Instance of :class:`~robot.model.stats.SuiteStat`.
self.stat = SuiteStat(suite)
#: List of :class:`~robot.model.testsuite.TestSuite` objects.
self.suites = []
def visit(self, visitor):
visitor.visit_suite_statistics(self)
def __iter__(self):
yield self.stat
for child in self.suites:
for stat in child:
yield stat
class SuiteStatisticsBuilder(object):
def __init__(self, suite_stat_level):
self._suite_stat_level = suite_stat_level
self._stats_stack = []
self.stats = None
@property
def current(self):
return self._stats_stack[-1] if self._stats_stack else None
def start_suite(self, suite):
self._stats_stack.append(SuiteStatistics(suite))
if self.stats is None:
self.stats = self.current
def add_test(self, test):
self.current.stat.add_test(test)
def end_suite(self):
stats = self._stats_stack.pop()
if self.current:
self.current.stat.add_stat(stats.stat)
if self._is_child_included():
self.current.suites.append(stats)
def _is_child_included(self):
return self._include_all_levels() or self._below_threshold()
def _include_all_levels(self):
return self._suite_stat_level == -1
def _below_threshold(self):
return len(self._stats_stack) < self._suite_stat_level
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .tags import TagPatterns
from .namepatterns import SuiteNamePatterns, TestNamePatterns
from .visitor import SuiteVisitor
class EmptySuiteRemover(SuiteVisitor):
def end_suite(self, suite):
suite.suites = [s for s in suite.suites if s.test_count]
def visit_test(self, test):
pass
def visit_keyword(self, kw):
pass
class Filter(EmptySuiteRemover):
def __init__(self, include_suites=None, include_tests=None,
include_tags=None, exclude_tags=None):
self.include_suites = include_suites
self.include_tests = include_tests
self.include_tags = include_tags
self.exclude_tags = exclude_tags
@setter
def include_suites(self, suites):
return SuiteNamePatterns(suites) \
if not isinstance(suites, SuiteNamePatterns) else suites
@setter
def include_tests(self, tests):
return TestNamePatterns(tests) \
if not isinstance(tests, TestNamePatterns) else tests
@setter
def include_tags(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
@setter
def exclude_tags(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
def start_suite(self, suite):
if not self:
return False
if hasattr(suite, 'starttime'):
suite.starttime = suite.endtime = None
if self.include_suites:
return self._filter_by_suite_name(suite)
if self.include_tests:
suite.tests = self._filter(suite, self._included_by_test_name)
if self.include_tags:
suite.tests = self._filter(suite, self._included_by_tags)
if self.exclude_tags:
suite.tests = self._filter(suite, self._not_excluded_by_tags)
return bool(suite.suites)
def _filter_by_suite_name(self, suite):
if self.include_suites.match(suite.name, suite.longname):
suite.visit(Filter(include_suites=[],
include_tests=self.include_tests,
include_tags=self.include_tags,
exclude_tags=self.exclude_tags))
return False
suite.tests = []
return True
def _filter(self, suite, filter):
return [t for t in suite.tests if filter(t)]
def _included_by_test_name(self, test):
return self.include_tests.match(test.name, test.longname)
def _included_by_tags(self, test):
return self.include_tags.match(test.tags)
def _not_excluded_by_tags(self, test):
return not self.exclude_tags.match(test.tags)
def __nonzero__(self):
return bool(self.include_suites or self.include_tests or
self.include_tags or self.exclude_tags)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .itemlist import ItemList
from .message import Message, Messages
from .modelobject import ModelObject
class Keyword(ModelObject):
"""Base model for single keyword."""
__slots__ = ['parent', 'name', 'doc', 'args', 'type', 'timeout']
KEYWORD_TYPE = 'kw'
SETUP_TYPE = 'setup'
TEARDOWN_TYPE = 'teardown'
FOR_LOOP_TYPE = 'for'
FOR_ITEM_TYPE = 'foritem'
keyword_class = None
message_class = Message
def __init__(self, name='', doc='', args=(), type='kw', timeout=None):
#: :class:`~.testsuite.TestSuite` or
#: :class:`~.testcase.TestCase` that contains this keyword.
self.parent = None
#: Keyword name.
self.name = name
#: Keyword documentation.
self.doc = doc
#: Keyword arguments, a list of strings.
self.args = args
#: 'SETUP', 'TEARDOWN' or 'KW'.
self.type = type
#: Keyword timeout.
self.timeout = timeout
#: Keyword messages, a list of
#: :class:`~robot.model.message.Messages` instances.
self.messages = None
#: Child keyword results, a list of class:`~.Keyword`. instances
self.keywords = None
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class or self.__class__, self, keywords)
@setter
def messages(self, messages):
return Messages(self.message_class, self, messages)
@property
def id(self):
if not self.parent:
return 'k1'
return '%s-k%d' % (self.parent.id, self.parent.keywords.index(self)+1)
def visit(self, visitor):
visitor.visit_keyword(self)
class Keywords(ItemList):
__slots__ = []
def __init__(self, keyword_class=Keyword, parent=None, keywords=None):
ItemList.__init__(self, keyword_class, {'parent': parent}, keywords)
@property
def setup(self):
return self[0] if (self and self[0].type == 'setup') else None
@property
def teardown(self):
return self[-1] if (self and self[-1].type == 'teardown') else None
@property
def all(self):
return self
@property
def normal(self):
for kw in self:
if kw.type not in ('setup', 'teardown'):
yield kw
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .modelobject import ModelObject
from .tags import Tags
class TestCase(ModelObject):
"""Base model for single test case."""
__slots__ = ['parent', 'name', 'doc', 'timeout']
keyword_class = Keyword
def __init__(self, name='', doc='', tags=None, timeout=None):
#: :class:`~.testsuite.TestSuite` that contains this test.
self.parent = None
#: Test case name.
self.name = name
#: Test case documentation.
self.doc = doc
#: Test case tags, a list of strings.
self.tags = tags
#: Test case timeout.
self.timeout = timeout
#: Keyword results, a list of :class:`~.keyword.Keyword`
#: instances and contains also possible setup and teardown keywords.
self.keywords = None
@setter
def tags(self, tags):
return Tags(tags)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
if not self.parent:
return 't1'
return '%s-t%d' % (self.parent.id, self.parent.tests.index(self)+1)
@property
def longname(self):
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
def visit(self, visitor):
visitor.visit_test(self)
class TestCases(ItemList):
__slots__ = []
def __init__(self, test_class=TestCase, parent=None, tests=None):
ItemList.__init__(self, test_class, {'parent': parent}, tests)
def _check_type_and_set_attrs(self, test):
ItemList._check_type_and_set_attrs(self, test)
for visitor in test.parent._visitors:
test.visit(visitor)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils.setter import SetterAwareType
class ModelObject(object):
__slots__ = []
__metaclass__ = SetterAwareType
def __unicode__(self):
return self.name
def __str__(self):
return unicode(self).encode('ASCII', 'replace')
def __repr__(self):
return repr(str(self))
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter
from .configurer import SuiteConfigurer
from .filter import Filter, EmptySuiteRemover
from .itemlist import ItemList
from .keyword import Keyword, Keywords
from .metadata import Metadata
from .modelobject import ModelObject
from .tagsetter import TagSetter
from .testcase import TestCase, TestCases
class TestSuite(ModelObject):
"""Base model for single suite.
"""
__slots__ = ['parent', 'source', '_name', 'doc', '_my_visitors']
test_class = TestCase
keyword_class = Keyword
def __init__(self, name='', doc='', metadata=None, source=None):
#: Parent :class:`TestSuite` or `None`.
self.parent = None
#: Test suite name.
self.name = name
#: Test suite documentation.
self.doc = doc
#: Test suite metadata as a dictionary.
self.metadata = metadata
#: Path to the source file or directory.
self.source = source
#: A list of child :class:`~.testsuite.TestSuite` instances.
self.suites = None
#: A list of :class:`~.testcase.TestCase` instances.
self.tests = None
#: A list containing setup and teardown as
#: :class:`~keyword.Keyword` instances.
self.keywords = None
self._my_visitors = []
@property
def _visitors(self):
parent_visitors = self.parent._visitors if self.parent else []
return self._my_visitors + parent_visitors
def _get_name(self):
return self._name or ' & '.join(s.name for s in self.suites)
def _set_name(self, name):
self._name = name
name = property(_get_name, _set_name)
@setter
def metadata(self, metadata):
"""Free test suite metadata as a dictionary."""
return Metadata(metadata)
@setter
def suites(self, suites):
"""A list-like :class:`~.TestSuites` object containing child suites."""
return TestSuites(self.__class__, self, suites)
@setter
def tests(self, tests):
return TestCases(self.test_class, self, tests)
@setter
def keywords(self, keywords):
return Keywords(self.keyword_class, self, keywords)
@property
def id(self):
"""An automatically generated unique id.
The root suite has id ``s1``, its children have ids ``s1-s1``,
``s1-s2``, ..., their children get ids ``s1-s1-s1``, ``s1-s1-s2``,
..., ``s1-s2-s1``, ..., and so on.
"""
if not self.parent:
return 's1'
return '%s-s%d' % (self.parent.id, self.parent.suites.index(self)+1)
@property
def longname(self):
"""Suite name prefixed with all parent suite names."""
if not self.parent:
return self.name
return '%s.%s' % (self.parent.longname, self.name)
@property
def test_count(self):
"""Number of the tests in this suite, recursively."""
return len(self.tests) + sum(suite.test_count for suite in self.suites)
def set_tags(self, add=None, remove=None, persist=False):
"""Add and/or remove specified tags to the tests in this suite.
:param add: Tags to add as a list or, if adding only one,
as a single string.
:param remove: Tags to remove as a list or as a single string.
Can be given as patterns where ``*`` and ``?`` work as wildcards.
:param persist: Add/remove specified tags also to new tests added
to this suite in the future.
"""
setter = TagSetter(add, remove)
self.visit(setter)
if persist:
self._my_visitors.append(setter)
def filter(self, included_suites=None, included_tests=None,
included_tags=None, excluded_tags=None):
"""Select test cases and remove others from this suite.
Parameters have the same semantics as ``--suite``, ``--test``,
``--include``, and ``--exclude`` command line options. All of them
can be given as a list of strings, or when selecting only one, as
a single string.
Child suites that contain no tests after filtering are automatically
removed.
Example::
suite.filter(included_tests=['Test 1', '* Example'],
included_tags='priority-1')
"""
self.visit(Filter(included_suites, included_tests,
included_tags, excluded_tags))
def configure(self, **options):
self.visit(SuiteConfigurer(**options))
def remove_empty_suites(self):
"""Removes all child suites not containing any tests, recursively."""
self.visit(EmptySuiteRemover())
def visit(self, visitor):
visitor.visit_suite(self)
class TestSuites(ItemList):
__slots__ = []
def __init__(self, suite_class=TestSuite, parent=None, suites=None):
ItemList.__init__(self, suite_class, {'parent': parent}, suites)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .stats import TotalStat
from .visitor import SuiteVisitor
class TotalStatistics(object):
"""Container for total statistics."""
def __init__(self):
#: Instance of :class:`~robot.model.stats.TotalStat` for critical tests.
self.critical = TotalStat('Critical Tests')
#: Instance of :class:`~robot.model.stats.TotalStat` for all the tests.
self.all = TotalStat('All Tests')
def visit(self, visitor):
visitor.visit_total_statistics(self)
def __iter__(self):
return iter([self.critical, self.all])
@property
def message(self):
"""String representation of the statistics.
For example::
2 critical tests, 1 passed, 1 failed
2 tests total, 1 passed, 1 failed
"""
ctotal, cend, cpass, cfail = self._get_counts(self.critical)
atotal, aend, apass, afail = self._get_counts(self.all)
return ('%d critical test%s, %d passed, %d failed\n'
'%d test%s total, %d passed, %d failed'
% (ctotal, cend, cpass, cfail, atotal, aend, apass, afail))
def _get_counts(self, stat):
ending = 's' if stat.total != 1 else ''
return stat.total, ending, stat.passed, stat.failed
class TotalStatisticsBuilder(SuiteVisitor):
def __init__(self, suite=None):
self.stats = TotalStatistics()
if suite:
suite.visit(self)
def add_test(self, test):
self.stats.all.add_test(test)
if test.critical:
self.stats.critical.add_test(test)
def visit_test(self, test):
self.add_test(test)
def visit_keyword(self, kw):
pass
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import MultiMatcher
class _NamePatterns(object):
def __init__(self, patterns=None):
self._matcher = MultiMatcher(patterns, ignore=['_'])
def match(self, name, longname=None):
return self._match(name) or longname and self._match_longname(longname)
def _match(self, name):
return self._matcher.match(name)
def _match_longname(self, name):
raise NotImplementedError
def __nonzero__(self):
return bool(self._matcher)
def __iter__(self):
return iter(self._matcher)
class SuiteNamePatterns(_NamePatterns):
def _match_longname(self, name):
while '.' in name:
if self._match(name):
return True
name = name.split('.', 1)[1]
return False
class TestNamePatterns(_NamePatterns):
def _match_longname(self, name):
return self._match(name)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .totalstatistics import TotalStatisticsBuilder
from .suitestatistics import SuiteStatisticsBuilder
from .tagstatistics import TagStatisticsBuilder
from .visitor import SuiteVisitor
class Statistics(object):
"""Container for total, suite and tag statistics.
Accepted parameters have the same semantics as the matching command line
options.
"""
def __init__(self, suite, suite_stat_level=-1, tag_stat_include=None,
tag_stat_exclude=None, tag_stat_combine=None, tag_doc=None,
tag_stat_link=None):
total_builder = TotalStatisticsBuilder()
suite_builder = SuiteStatisticsBuilder(suite_stat_level)
tag_builder = TagStatisticsBuilder(suite.criticality, tag_stat_include,
tag_stat_exclude, tag_stat_combine,
tag_doc, tag_stat_link)
suite.visit(StatisticsBuilder(total_builder, suite_builder, tag_builder))
#: Instance of :class:`~robot.model.totalstatistics.TotalStatistics`.
self.total = total_builder.stats
#: Instance of :class:`~robot.model.suitestatistics.SuiteStatistics`.
self.suite = suite_builder.stats
#: Instance of :class:`~robot.model.tagstatistics.TagStatistics`.
self.tags = tag_builder.stats
def visit(self, visitor):
visitor.visit_statistics(self)
class StatisticsBuilder(SuiteVisitor):
def __init__(self, total_builder, suite_builder, tag_builder):
self._total_builder = total_builder
self._suite_builder = suite_builder
self._tag_builder = tag_builder
def start_suite(self, suite):
self._suite_builder.start_suite(suite)
def end_suite(self, suite):
self._suite_builder.end_suite()
def visit_test(self, test):
self._total_builder.add_test(test)
self._suite_builder.add_test(test)
self._tag_builder.add_test(test)
def visit_keyword(self, kw):
pass
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package with reusable and extendable model classes.
This package contains base classes, for example, for
:class:`test suites <robot.model.testsuite.TestSuite>`,
:class:`test cases <robot.model.testcase.TestCase>` and
:class:`keywords <robot.model.keyword.Keyword>`, and for other generic
functionality, such as :mod:`visitors <robot.model.visitor>`.
These classes are extended both in :mod:`robot.result` and :mod:`robot.running`
packages and used also elsewhere. There should, however, be no need to
externally use these classes directly, and they are not part of the public API.
This package is considered stable.
"""
from .configurer import SuiteConfigurer
from .testsuite import TestSuite
from .testcase import TestCase
from .keyword import Keyword
from .message import Message
from .tags import Tags, TagPatterns
from .criticality import Criticality
from .namepatterns import SuiteNamePatterns, TestNamePatterns
from .visitor import SuiteVisitor, SkipAllVisitor
from .totalstatistics import TotalStatisticsBuilder
from .statistics import Statistics
from .imports import Imports
from .itemlist import ItemList
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import elapsed_time_to_string, html_escape, normalize
from .tags import TagPatterns
class Stat(object):
"""Generic statistic object used for storing all the statistic values."""
def __init__(self, name):
#: Human readable identifier of the object these statistics
#: belong to. Either `All Tests` or `Critical Tests` for
#: :class:`~robot.model.totalstatistics.TotalStatistics`,
#: long name of the suite for
#: :class:`~robot.model.suitestatistics.SuiteStatistics`
#: or name of the tag for
#: :class:`~robot.model.tagstatistics.TagStatistics`
self.name = name
#: Number of passed tests.
self.passed = 0
#: Number of failed tests.
self.failed = 0
#: Number of milliseconds it took to execute.
self.elapsed = 0
self._norm_name = normalize(name, ignore='_')
def get_attributes(self, include_label=False, include_elapsed=False,
exclude_empty=False, values_as_strings=False,
html_escape=False):
attrs = {'pass': self.passed, 'fail': self.failed}
attrs.update(self._get_custom_attrs())
if include_label:
attrs['label'] = self.name
if include_elapsed:
attrs['elapsed'] = elapsed_time_to_string(self.elapsed,
include_millis=False)
if exclude_empty:
attrs = dict((k, v) for k, v in attrs.items() if v != '')
if values_as_strings:
attrs = dict((k, unicode(v)) for k, v in attrs.items())
if html_escape:
attrs = dict((k, self._html_escape(v)) for k, v in attrs.items())
return attrs
def _get_custom_attrs(self):
return {}
def _html_escape(self, item):
return html_escape(item) if isinstance(item, basestring) else item
@property
def total(self):
return self.passed + self.failed
def add_test(self, test):
self._update_stats(test)
self._update_elapsed(test)
def _update_stats(self, test):
if test.passed:
self.passed += 1
else:
self.failed += 1
def _update_elapsed(self, test):
self.elapsed += test.elapsedtime
def __cmp__(self, other):
return cmp(self._norm_name, other._norm_name)
def __nonzero__(self):
return not self.failed
def visit(self, visitor):
visitor.visit_stat(self)
class TotalStat(Stat):
"""Stores statistic values for a test run."""
#: Always string `total`
type = 'total'
class SuiteStat(Stat):
"""Stores statistics values for a single suite."""
#: Always string `suite`
type = 'suite'
def __init__(self, suite):
Stat.__init__(self, suite.longname)
#: Identifier of the suite, e.g. `s1-s2`.
self.id = suite.id
#: Number of milliseconds it took to execute this suite,
#: including sub-suites.
self.elapsed = suite.elapsedtime
self._name = suite.name
def _get_custom_attrs(self):
return {'id': self.id, 'name': self._name}
def _update_elapsed(self, test):
pass
def add_stat(self, other):
self.passed += other.passed
self.failed += other.failed
class TagStat(Stat):
"""Stores statistic values for a single tag."""
#: Always string `tag`.
type = 'tag'
def __init__(self, name, doc='', links=None, critical=False,
non_critical=False, combined=''):
Stat.__init__(self, name)
#: Documentation of tag as a string.
self.doc = doc
#: List of tuples in which the first value is the link URL and
#: the second is the link title. An empty list by default.
self.links = links or []
#: ``True`` if tag is considered critical, ``False`` otherwise.
self.critical = critical
#: ``True`` if tag is considered non-critical, ``False`` otherwise.
self.non_critical = non_critical
#: Pattern as a string if the tag is combined,
#: an empty string otherwise.
self.combined = combined
@property
def info(self):
"""Returns additional information of the tag statistics
are about. Either `critical`, `non-critical`, `combined` or an
empty string.
"""
if self.critical:
return 'critical'
if self.non_critical:
return 'non-critical'
if self.combined:
return 'combined'
return ''
def _get_custom_attrs(self):
return {'doc': self.doc, 'links': self._get_links_as_string(),
'info': self.info, 'combined': self.combined}
def _get_links_as_string(self):
return ':::'.join('%s:%s' % (title, url) for url, title in self.links)
def __cmp__(self, other):
return cmp(other.critical, self.critical) \
or cmp(other.non_critical, self.non_critical) \
or cmp(bool(other.combined), bool(self.combined)) \
or Stat.__cmp__(self, other)
class CombinedTagStat(TagStat):
def __init__(self, pattern, name=None, doc='', links=None):
TagStat.__init__(self, name or pattern, doc, links, combined=pattern)
self._matcher = TagPatterns(pattern)
def match(self, tags):
return self._matcher.match(tags)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import NormalizedDict
class Metadata(NormalizedDict):
def __init__(self, initial=None):
NormalizedDict.__init__(self, initial, ignore='_')
def __unicode__(self):
return u'{%s}' % ', '.join('%s: %s' % (k, self[k]) for k in self)
def __str__(self):
return unicode(self).encode('ASCII', 'replace')
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.utils import seq2str
from .itemlist import ItemList
class Import(object):
ALLOWED_TYPES = ('Library', 'Resource', 'Variables')
def __init__(self, type, name, args=(), alias=None, source=None):
if type not in self.ALLOWED_TYPES:
raise ValueError("Invalid import type '%s'. Should be one of %s."
% (type, seq2str(self.ALLOWED_TYPES, lastsep=' or ')))
self.type = type
self.name = name
self.args = args
self.alias = alias
self.source = source
@property
def directory(self):
if not self.source:
return None
if os.path.isdir(self.source):
return self.source
return os.path.dirname(self.source)
def report_invalid_syntax(self, message, level='ERROR'):
from robot.output import LOGGER
LOGGER.write("Error in file '%s': %s"
% (self.source or '<unknown>', message), level)
class Imports(ItemList):
def __init__(self, source, imports=None):
ItemList.__init__(self, Import, {'source': source}, items=imports)
def library(self, name, args=(), alias=None):
self.create('Library', name, args, alias)
def resource(self, path):
self.create('Resource', path)
def variables(self, path, args=()):
self.create('Variables', path, args)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class SuiteVisitor(object):
def visit_suite(self, suite):
if self.start_suite(suite) is not False:
suite.keywords.visit(self)
suite.suites.visit(self)
suite.tests.visit(self)
self.end_suite(suite)
def start_suite(self, suite):
pass
def end_suite(self, suite):
pass
def visit_test(self, test):
if self.start_test(test) is not False:
test.keywords.visit(self)
self.end_test(test)
def start_test(self, test):
pass
def end_test(self, test):
pass
def visit_keyword(self, kw):
if self.start_keyword(kw) is not False:
kw.keywords.visit(self)
kw.messages.visit(self)
self.end_keyword(kw)
def start_keyword(self, keyword):
pass
def end_keyword(self, keyword):
pass
def visit_message(self, msg):
if self.start_message(msg) is not False:
self.end_message(msg)
def start_message(self, msg):
pass
def end_message(self, msg):
pass
class SkipAllVisitor(SuiteVisitor):
"""Travels suite and it's sub-suites without doing anything."""
def visit_suite(self, suite):
pass
def visit_keyword(self, kw):
pass
def visit_test(self, test):
pass
def visit_message(self, msg):
pass
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ItemList(object):
__slots__ = ['_item_class', '_common_attrs', '_items']
def __init__(self, item_class, common_attrs=None, items=None):
self._item_class = item_class
self._common_attrs = common_attrs
self._items = ()
if items:
self.extend(items)
def create(self, *args, **kwargs):
return self.append(self._item_class(*args, **kwargs))
def append(self, item):
self._check_type_and_set_attrs(item)
self._items += (item,)
return item
def _check_type_and_set_attrs(self, item):
if not isinstance(item, self._item_class):
raise TypeError("Only '%s' objects accepted, got '%s'."
% (self._item_class.__name__, type(item).__name__))
if self._common_attrs:
for attr in self._common_attrs:
setattr(item, attr, self._common_attrs[attr])
def extend(self, items):
for item in items:
self._check_type_and_set_attrs(item)
self._items += tuple(items)
if hasattr(tuple, 'index'): # tuples got index method in Python 2.6
def index(self, item):
return self._items.index(item)
else:
def index(self, item):
return list(self._items).index(item)
def clear(self):
self._items = ()
def visit(self, visitor):
for item in self:
item.visit(visitor)
def __iter__(self):
return iter(self._items)
def __getitem__(self, index):
if isinstance(index, slice):
raise TypeError("'%s' objects do not support slicing."
% type(self).__name__)
return self._items[index]
def __setitem__(self, index, item):
if isinstance(index, slice):
raise TypeError("'%s' objects do not support slicing."
% type(self).__name__)
self._check_type_and_set_attrs(item)
items = list(self._items)
items[index] = item
self._items = tuple(items)
def __len__(self):
return len(self._items)
def __unicode__(self):
return u'[%s]' % ', '.join(unicode(item) for item in self)
def __str__(self):
return unicode(self).encode('ASCII', 'replace')
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from os.path import basename, splitext
from robot.htmldata import HtmlFileWriter, ModelWriter, LOG, REPORT
from .jswriter import JsResultWriter, SplitLogWriter
class _LogReportWriter(object):
def __init__(self, js_model):
self._js_model = js_model
def _write_file(self, path, config, template):
outfile = open(path, 'w') \
if isinstance(path, basestring) else path # unit test hook
with outfile:
model_writer = RobotModelWriter(outfile, self._js_model, config)
writer = HtmlFileWriter(outfile, model_writer)
writer.write(template)
class LogWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, LOG)
if self._js_model.split_results:
self._write_split_logs(splitext(path)[0])
def _write_split_logs(self, base):
for index, (keywords, strings) in enumerate(self._js_model.split_results):
index += 1 # enumerate accepts start index only in Py 2.6+
self._write_split_log(index, keywords, strings, '%s-%d.js' % (base, index))
def _write_split_log(self, index, keywords, strings, path):
with open(path, 'w') as outfile:
writer = SplitLogWriter(outfile)
writer.write(keywords, strings, index, basename(path))
class ReportWriter(_LogReportWriter):
def write(self, path, config):
self._write_file(path, config, REPORT)
class RobotModelWriter(ModelWriter):
def __init__(self, output, model, config):
self._output = output
self._model = model
self._config = config
def write(self, line):
JsResultWriter(self._output).write(self._model, self._config)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.htmldata import JsonWriter
class JsResultWriter(object):
_output_attr = 'window.output'
_settings_attr = 'window.settings'
_suite_key = 'suite'
_strings_key = 'strings'
def __init__(self, output,
start_block='<script type="text/javascript">\n',
end_block='</script>\n',
split_threshold=9500):
writer = JsonWriter(output, separator=end_block+start_block)
self._write = writer.write
self._write_json = writer.write_json
self._start_block = start_block
self._end_block = end_block
self._split_threshold = split_threshold
def write(self, result, settings):
self._start_output_block()
self._write_suite(result.suite)
self._write_strings(result.strings)
self._write_data(result.data)
self._write_settings_and_end_output_block(settings)
def _start_output_block(self):
self._write(self._start_block, postfix='', separator=False)
self._write('%s = {}' % self._output_attr)
def _write_suite(self, suite):
writer = SuiteWriter(self._write_json, self._split_threshold)
writer.write(suite, self._output_var(self._suite_key))
def _write_strings(self, strings):
variable = self._output_var(self._strings_key)
self._write('%s = []' % variable)
prefix = '%s = %s.concat(' % (variable, variable)
postfix = ');\n'
threshold = self._split_threshold
for index in xrange(0, len(strings), threshold):
self._write_json(prefix, strings[index:index+threshold], postfix)
def _write_data(self, data):
for key in data:
self._write_json('%s = ' % self._output_var(key), data[key])
def _write_settings_and_end_output_block(self, settings):
self._write_json('%s = ' % self._settings_attr, settings,
separator=False)
self._write(self._end_block, postfix='', separator=False)
def _output_var(self, key):
return '%s["%s"]' % (self._output_attr, key)
class SuiteWriter(object):
def __init__(self, write_json, split_threshold):
self._write_json = write_json
self._split_threshold = split_threshold
def write(self, suite, variable):
mapping = {}
self._write_parts_over_threshold(suite, mapping)
self._write_json('%s = ' % variable, suite, mapping=mapping)
def _write_parts_over_threshold(self, data, mapping):
if not isinstance(data, tuple):
return 1
not_written = 1 + sum(self._write_parts_over_threshold(item, mapping)
for item in data)
if not_written > self._split_threshold:
self._write_part(data, mapping)
return 1
return not_written
def _write_part(self, data, mapping):
part_name = 'window.sPart%d' % len(mapping)
self._write_json('%s = ' % part_name, data, mapping=mapping)
mapping[data] = part_name
class SplitLogWriter(object):
def __init__(self, output):
self._writer = JsonWriter(output)
def write(self, keywords, strings, index, notify):
self._writer.write_json('window.keywords%d = ' % index, keywords)
self._writer.write_json('window.strings%d = ' % index, strings)
self._writer.write('window.fileLoading.notify("%s")' % notify)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot.result.visitor import ResultVisitor
from robot.utils import XmlWriter
class XUnitWriter(object):
def __init__(self, execution_result, skip_noncritical):
self._execution_result = execution_result
self._skip_noncritical = skip_noncritical
def write(self, output):
writer = XUnitFileWriter(XmlWriter(output, encoding='UTF-8'), self._skip_noncritical)
self._execution_result.visit(writer)
class XUnitFileWriter(ResultVisitor):
"""Provides an xUnit-compatible result file.
Attempts to adhere to the de facto schema guessed by Peter Reilly, see:
http://marc.info/?l=ant-dev&m=123551933508682
"""
def __init__(self, xml_writer, skip_noncritical=False):
self._writer = xml_writer
self._root_suite = None
self._skip_noncritical = skip_noncritical
def start_suite(self, suite):
if self._root_suite:
return
self._root_suite = suite
tests, failures, skip = self._get_stats(suite.statistics)
attrs = {'name': suite.name,
'tests': tests,
'errors': '0',
'failures': failures,
'skip': skip}
self._writer.start('testsuite', attrs)
def _get_stats(self, statistics):
if self._skip_noncritical:
failures = statistics.critical.failed
skip = statistics.all.total - statistics.critical.total
else:
failures = statistics.all.failed
skip = 0
return str(statistics.all.total), str(failures), str(skip)
def end_suite(self, suite):
if suite is self._root_suite:
self._writer.end('testsuite')
def visit_test(self, test):
self._writer.start('testcase',
{'classname': test.parent.longname,
'name': test.name,
'time': self._time_as_seconds(test.elapsedtime)})
if self._skip_noncritical and not test.critical:
self._skip_test(test)
elif not test.passed:
self._fail_test(test)
self._writer.end('testcase')
def _skip_test(self, test):
self._writer.element('skipped', '%s: %s' % (test.status, test.message)
if test.message else test.status)
def _fail_test(self, test):
self._writer.element('failure', attrs={'message': test.message,
'type': 'AssertionError'})
def _time_as_seconds(self, millis):
return str(int(round(millis, -3) / 1000))
def visit_keyword(self, kw):
pass
def visit_statistics(self, stats):
pass
def visit_errors(self, errors):
pass
def end_result(self, result):
self._writer.close()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.output import LOGGER
from robot.result import ExecutionResult, Result
from robot.utils import unic
from .jsmodelbuilders import JsModelBuilder
from .logreportwriters import LogWriter, ReportWriter
from .xunitwriter import XUnitWriter
class ResultWriter(object):
"""A class to create log, report, output XML and xUnit files.
:param sources: Either one :class:`~robot.result.executionresult.Result`
object, or one or more paths to existing output XML files.
By default writes ``report.html`` and ``log.html``, but no output XML
or xUnit files. Custom file names can be given and results disabled
or enabled using ``settings`` or ``options`` passed to the
:meth:`write_results` method. The latter is typically more convenient::
writer = ResultWriter(result)
writer.write_results(report='custom.html', log=None, xunit='xunit.xml')
"""
def __init__(self, *sources):
self._sources = sources
def write_results(self, settings=None, **options):
"""Writes results based on the given ``settings`` or ``options``.
:param settings: :class:`~robot.conf.settings.RebotSettings` object
to configure result writing.
:param options: Used to construct new
:class:`~robot.conf.settings.RebotSettings` object if ``settings``
are not given.
"""
settings = settings or RebotSettings(options)
results = Results(settings, *self._sources)
if settings.output:
self._write_output(results.result, settings.output)
if settings.xunit:
self._write_xunit(results.result, settings.xunit,
settings.xunit_skip_noncritical)
if settings.log:
config = dict(settings.log_config,
minLevel=results.js_result.min_level)
self._write_log(results.js_result, settings.log, config)
if settings.report:
results.js_result.remove_data_not_needed_in_report()
self._write_report(results.js_result, settings.report,
settings.report_config)
return results.return_code
def _write_output(self, result, path):
self._write('Output', result.save, path)
def _write_xunit(self, result, path, skip_noncritical):
self._write('XUnit', XUnitWriter(result, skip_noncritical).write, path)
def _write_log(self, js_result, path, config):
self._write('Log', LogWriter(js_result).write, path, config)
def _write_report(self, js_result, path, config):
self._write('Report', ReportWriter(js_result).write, path, config)
def _write(self, name, writer, path, *args):
try:
writer(path, *args)
except DataError, err:
LOGGER.error(unicode(err))
except EnvironmentError, err:
# `err.filename` can be different than `path` at least if reading
# log/report templates or writing split log fails.
# `unic` is needed due to http://bugs.jython.org/issue1825.
LOGGER.error("Writing %s file '%s' failed: %s: %s" %
(name.lower(), path, err.strerror, unic(err.filename)))
else:
LOGGER.output_file(name, path)
class Results(object):
def __init__(self, settings, *sources):
self._settings = settings
self._sources = sources
if len(sources) == 1 and isinstance(sources[0], Result):
self._result = sources[0]
self._prune = False
self.return_code = self._result.return_code
else:
self._result = None
self._prune = True
self.return_code = -1
self._js_result = None
@property
def result(self):
if self._result is None:
include_keywords = bool(self._settings.log or self._settings.output)
flattened = self._settings.flatten_keywords
rerun_merge = self._settings.rerun_merge
self._result = ExecutionResult(include_keywords=include_keywords,
flattened_keywords=flattened,
rerun_merge=rerun_merge,
*self._sources)
self._result.configure(self._settings.status_rc,
self._settings.suite_config,
self._settings.statistics_config)
self.return_code = self._result.return_code
return self._result
@property
def js_result(self):
if self._js_result is None:
builder = JsModelBuilder(log_path=self._settings.log,
split_log=self._settings.split_log,
prune_input_to_save_memory=self._prune)
self._js_result = builder.build_from(self.result)
if self._prune:
self._result = None
return self._js_result
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from robot.output import LEVELS
from .jsbuildingcontext import JsBuildingContext
from .jsexecutionresult import JsExecutionResult
class JsModelBuilder(object):
def __init__(self, log_path=None, split_log=False,
prune_input_to_save_memory=False):
self._context = JsBuildingContext(log_path, split_log,
prune_input_to_save_memory)
def build_from(self, result_from_xml):
# Statistics must be build first because building suite may prune input.
return JsExecutionResult(
statistics=StatisticsBuilder().build(result_from_xml.statistics),
suite=SuiteBuilder(self._context).build(result_from_xml.suite),
errors=ErrorsBuilder(self._context).build(result_from_xml.errors),
strings=self._context.strings,
basemillis=self._context.basemillis,
split_results=self._context.split_results,
min_level=self._context.min_level
)
class _Builder(object):
_statuses = {'FAIL': 0, 'PASS': 1, 'NOT_RUN': 2}
def __init__(self, context):
self._context = context
self._string = self._context.string
self._html = self._context.html
self._timestamp = self._context.timestamp
def _get_status(self, item):
model = (self._statuses[item.status],
self._timestamp(item.starttime),
item.elapsedtime)
msg = getattr(item, 'message', '')
if not msg:
return model
elif msg.startswith('*HTML*'):
msg = self._string(msg[6:].lstrip(), escape=False)
else:
msg = self._string(msg)
return model + (msg,)
def _build_keywords(self, kws, split=False):
splitting = self._context.start_splitting_if_needed(split)
model = tuple(self._build_keyword(k) for k in kws)
return model if not splitting else self._context.end_splitting(model)
class SuiteBuilder(_Builder):
def __init__(self, context):
_Builder.__init__(self, context)
self._build_suite = self.build
self._build_test = TestBuilder(context).build
self._build_keyword = KeywordBuilder(context).build
def build(self, suite):
with self._context.prune_input(suite.suites, suite.tests, suite.keywords):
stats = self._get_statistics(suite) # Must be done before pruning
return (self._string(suite.name),
self._string(suite.source),
self._context.relative_source(suite.source),
self._html(suite.doc),
tuple(self._yield_metadata(suite)),
self._get_status(suite),
tuple(self._build_suite(s) for s in suite.suites),
tuple(self._build_test(t) for t in suite.tests),
tuple(self._build_keyword(k, split=True) for k in suite.keywords),
stats)
def _yield_metadata(self, suite):
for name, value in suite.metadata.iteritems():
yield self._string(name)
yield self._html(value)
def _get_statistics(self, suite):
stats = suite.statistics # Access property only once
return (stats.all.total,
stats.all.passed,
stats.critical.total,
stats.critical.passed)
class TestBuilder(_Builder):
def __init__(self, context):
_Builder.__init__(self, context)
self._build_keyword = KeywordBuilder(context).build
def build(self, test):
with self._context.prune_input(test.keywords):
return (self._string(test.name),
self._string(test.timeout),
int(test.critical),
self._html(test.doc),
tuple(self._string(t) for t in test.tags),
self._get_status(test),
self._build_keywords(test.keywords, split=True))
class KeywordBuilder(_Builder):
_types = {'kw': 0, 'setup': 1, 'teardown': 2, 'for': 3, 'foritem': 4}
def __init__(self, context):
_Builder.__init__(self, context)
self._build_keyword = self.build
self._build_message = MessageBuilder(context).build
def build(self, kw, split=False):
with self._context.prune_input(kw.messages, kw.keywords):
return (self._types[kw.type],
self._string(kw.name),
self._string(kw.timeout),
self._html(kw.doc),
self._string(', '.join(kw.args)),
self._get_status(kw),
self._build_keywords(kw.keywords, split),
tuple(self._build_message(m) for m in kw.messages))
class MessageBuilder(_Builder):
def build(self, msg):
if msg.level == 'WARN':
self._context.create_link_target(msg)
self._context.message_level(msg.level)
return self._build(msg)
def _build(self, msg):
return (self._timestamp(msg.timestamp),
LEVELS[msg.level],
self._string(msg.html_message, escape=False))
class StatisticsBuilder(object):
def build(self, statistics):
return (self._build_stats(statistics.total),
self._build_stats(statistics.tags),
self._build_stats(statistics.suite))
def _build_stats(self, stats):
return tuple(stat.get_attributes(include_label=True, include_elapsed=True,
exclude_empty=True, html_escape=True)
for stat in stats)
class ErrorsBuilder(_Builder):
def __init__(self, context):
_Builder.__init__(self, context)
self._build_message = ErrorMessageBuilder(context).build
def build(self, errors):
with self._context.prune_input(errors.messages):
return tuple(self._build_message(msg) for msg in errors)
class ErrorMessageBuilder(MessageBuilder):
def build(self, msg):
model = self._build(msg)
link = self._context.link(msg)
return model if link is None else model + (link,)
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import os.path
from robot.output.loggerhelper import LEVELS
from robot.utils import (html_escape, html_format, get_link_path,
timestamp_to_secs)
from .stringcache import StringCache
class JsBuildingContext(object):
def __init__(self, log_path=None, split_log=False, prune_input=False):
# log_path can be a custom object in unit tests
self._log_dir = os.path.dirname(log_path) \
if isinstance(log_path, basestring) else None
self._split_log = split_log
self._prune_input = prune_input
self._strings = self._top_level_strings = StringCache()
self.basemillis = None
self.split_results = []
self.min_level = 'NONE'
self._msg_links = {}
def string(self, string, escape=True):
if escape and string:
if not isinstance(string, unicode):
string = unicode(string)
string = html_escape(string)
return self._strings.add(string)
def html(self, string):
return self.string(html_format(string), escape=False)
def relative_source(self, source):
rel_source = get_link_path(source, self._log_dir) \
if self._log_dir and source and os.path.exists(source) else ''
return self.string(rel_source)
def timestamp(self, time):
if not time:
return None
# Must use `long` due to http://ironpython.codeplex.com/workitem/31549
millis = long(round(timestamp_to_secs(time) * 1000))
if self.basemillis is None:
self.basemillis = millis
return millis - self.basemillis
def message_level(self, level):
if LEVELS[level] < LEVELS[self.min_level]:
self.min_level = level
def create_link_target(self, msg):
id = self._top_level_strings.add(msg.parent.id)
self._msg_links[self._link_key(msg)] = id
def link(self, msg):
return self._msg_links.get(self._link_key(msg))
def _link_key(self, msg):
return (msg.message, msg.level, msg.timestamp)
@property
def strings(self):
return self._strings.dump()
def start_splitting_if_needed(self, split=False):
if self._split_log and split:
self._strings = StringCache()
return True
return False
def end_splitting(self, model):
self.split_results.append((model, self.strings))
self._strings = self._top_level_strings
return len(self.split_results)
@contextmanager
def prune_input(self, *items):
yield
if self._prune_input:
for item in items:
item.clear()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from robot import utils
from .stringcache import StringIndex
class JsExecutionResult(object):
def __init__(self, suite, statistics, errors, strings, basemillis=None,
split_results=None, min_level=None):
self.suite = suite
self.strings = strings
self.min_level = min_level
self.data = self._get_data(statistics, errors, basemillis or 0)
self.split_results = split_results or []
def _get_data(self, statistics, errors, basemillis):
gentime = time.localtime()
return {
'stats': statistics,
'errors': errors,
'baseMillis': basemillis,
'generatedMillis': long(time.mktime(gentime) * 1000) - basemillis,
'generatedTimestamp': utils.format_time(gentime, gmtsep=' ')
}
def remove_data_not_needed_in_report(self):
self.data.pop('errors')
remover = _KeywordRemover()
self.suite = remover.remove_keywords(self.suite)
self.suite, self.strings \
= remover.remove_unused_strings(self.suite, self.strings)
class _KeywordRemover(object):
def remove_keywords(self, suite):
return self._remove_keywords_from_suite(suite)
def _remove_keywords_from_suite(self, suite):
return suite[:6] + (self._remove_keywords_from_suites(suite[6]),
self._remove_keywords_from_tests(suite[7]),
(), suite[9])
def _remove_keywords_from_suites(self, suites):
return tuple(self._remove_keywords_from_suite(s) for s in suites)
def _remove_keywords_from_tests(self, tests):
return tuple(self._remove_keywords_from_test(t) for t in tests)
def _remove_keywords_from_test(self, test):
return test[:-1] + ((),)
def remove_unused_strings(self, model, strings):
used = set(self._get_used_indices(model))
remap = {}
strings = tuple(self._get_used_strings(strings, used, remap))
model = tuple(self._remap_string_indices(model, remap))
return model, strings
def _get_used_indices(self, model):
for item in model:
if isinstance(item, StringIndex):
yield item
elif isinstance(item, tuple):
for i in self._get_used_indices(item):
yield i
def _get_used_strings(self, strings, used_indices, remap):
offset = 0
for index, string in enumerate(strings):
if index in used_indices:
remap[index] = index - offset
yield string
else:
offset += 1
def _remap_string_indices(self, model, remap):
for item in model:
if isinstance(item, StringIndex):
yield remap[item]
elif isinstance(item, tuple):
yield tuple(self._remap_string_indices(item, remap))
else:
yield item
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements report, log, output XML, and xUnit file generation.
The public API of this package is the :class:`~.ResultWriter` class. It
can write result files based on XML output files on the file system,
as well as based on the result objects returned by
the :func:`~robot.result.resultbuilder.ExecutionResult` factory method or
an executed :class:`~robot.running.model.TestSuite`.
It is highly recommended to use the public API via the :mod:`robot.api` package.
This package is considered stable.
"""
from .resultwriter import ResultWriter
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import itemgetter
from robot.utils import compress_text
class StringIndex(long):
# Methods below are needed due to http://bugs.jython.org/issue1828
def __str__(self):
return long.__str__(self).rstrip('L')
def __nonzero__(self):
return bool(long(self))
class StringCache(object):
_compress_threshold = 80
_use_compressed_threshold = 1.1
_zero_index = StringIndex(0)
def __init__(self):
self._cache = {'*': self._zero_index}
def add(self, text):
if not text:
return self._zero_index
text = self._encode(text)
if text not in self._cache:
self._cache[text] = StringIndex(len(self._cache))
return self._cache[text]
def _encode(self, text):
raw = self._raw(text)
if raw in self._cache or len(raw) < self._compress_threshold:
return raw
compressed = compress_text(text)
if len(compressed) * self._use_compressed_threshold < len(raw):
return compressed
return raw
def _raw(self, text):
return '*'+text
def dump(self):
return tuple(item[0] for item in sorted(self._cache.iteritems(),
key=itemgetter(1)))
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.output.xmllogger import XmlLogger
class OutputWriter(XmlLogger):
def __init__(self, output):
XmlLogger.__init__(self, output, generator='Rebot')
def start_message(self, msg):
self._write_message(msg)
def close(self):
self._writer.end('robot')
self._writer.close()
def end_result(self, result):
self.close()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os.path
import re
from robot.utils import HtmlWriter
from robot.version import get_full_version
from .template import HtmlTemplate
class HtmlFileWriter(object):
def __init__(self, output, model_writer):
self._output = output
self._model_writer = model_writer
def write(self, template):
writers = self._get_writers(os.path.dirname(template))
for line in HtmlTemplate(template):
for writer in writers:
if writer.handles(line):
writer.write(line)
break
def _get_writers(self, base_dir):
html_writer = HtmlWriter(self._output)
return (self._model_writer,
JsFileWriter(html_writer, base_dir),
CssFileWriter(html_writer, base_dir),
GeneratorWriter(html_writer),
LineWriter(self._output))
class _Writer(object):
_handles_line = None
def handles(self, line):
return line.startswith(self._handles_line)
def write(self, line):
raise NotImplementedError
class ModelWriter(_Writer):
_handles_line = '<!-- JS MODEL -->'
class LineWriter(_Writer):
def __init__(self, output):
self._output = output
def handles(self, line):
return True
def write(self, line):
self._output.write(line + '\n')
class GeneratorWriter(_Writer):
_handles_line = '<meta name="Generator" content='
def __init__(self, html_writer):
self._html_writer = html_writer
def write(self, line):
version = get_full_version('Robot Framework')
self._html_writer.start('meta', {'name': 'Generator', 'content': version})
class _InliningWriter(_Writer):
def __init__(self, html_writer, base_dir):
self._html_writer = html_writer
self._base_dir = base_dir
def _inline_file(self, filename, tag, attrs):
self._html_writer.start(tag, attrs)
for line in HtmlTemplate(os.path.join(self._base_dir, filename)):
self._html_writer.content(line, escape=False, newline=True)
self._html_writer.end(tag)
class JsFileWriter(_InliningWriter):
_handles_line = '<script type="text/javascript" src='
_source_file = re.compile('src=\"([^\"]+)\"')
def write(self, line):
name = self._source_file.search(line).group(1)
self._inline_file(name, 'script', {'type': 'text/javascript'})
class CssFileWriter(_InliningWriter):
_handles_line = '<link rel="stylesheet"'
_source_file = re.compile('href=\"([^\"]+)\"')
_media_type = re.compile('media=\"([^\"]+)\"')
def write(self, line):
name = self._source_file.search(line).group(1)
media = self._media_type.search(line).group(1)
self._inline_file(name, 'style', {'type': 'text/css', 'media': media})
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class JsonWriter(object):
def __init__(self, output, separator=''):
self._writer = JsonDumper(output)
self._separator = separator
def write_json(self, prefix, data, postfix=';\n', mapping=None,
separator=True):
self._writer.write(prefix)
self._writer.dump(data, mapping)
self._writer.write(postfix)
self._write_separator(separator)
def write(self, string, postfix=';\n', separator=True):
self._writer.write(string + postfix)
self._write_separator(separator)
def _write_separator(self, separator):
if separator and self._separator:
self._writer.write(self._separator)
class JsonDumper(object):
def __init__(self, output):
self._output = output
self._dumpers = (MappingDumper(self),
IntegerDumper(self),
TupleListDumper(self),
StringDumper(self),
NoneDumper(self),
DictDumper(self))
def dump(self, data, mapping=None):
for dumper in self._dumpers:
if dumper.handles(data, mapping):
dumper.dump(data, mapping)
return
raise ValueError('Dumping %s not supported' % type(data))
def write(self, data):
self._output.write(data)
class _Dumper(object):
_handled_types = None
def __init__(self, jsondumper):
self._dump = jsondumper.dump
self._write = jsondumper.write
def handles(self, data, mapping):
return isinstance(data, self._handled_types)
def dump(self, data, mapping):
raise NotImplementedError
class StringDumper(_Dumper):
_handled_types = basestring
_search_and_replace = [('\\', '\\\\'), ('"', '\\"'), ('\t', '\\t'),
('\n', '\\n'), ('\r', '\\r'), ('</', '\\x3c/')]
def dump(self, data, mapping):
self._write('"%s"' % (self._encode(data) if data else ''))
def _encode(self, string):
for search, replace in self._search_and_replace:
if search in string:
string = string.replace(search, replace)
return string.encode('UTF-8')
class IntegerDumper(_Dumper):
_handled_types = (int, long, bool)
def dump(self, data, mapping):
self._write(str(data).lower())
class DictDumper(_Dumper):
_handled_types = dict
def dump(self, data, mapping):
self._write('{')
last_index = len(data) - 1
for index, key in enumerate(sorted(data)):
self._dump(key, mapping)
self._write(':')
self._dump(data[key], mapping)
if index < last_index:
self._write(',')
self._write('}')
class TupleListDumper(_Dumper):
_handled_types = (tuple, list)
def dump(self, data, mapping):
self._write('[')
last_index = len(data) - 1
for index, item in enumerate(data):
self._dump(item, mapping)
if index < last_index:
self._write(',')
self._write(']')
class MappingDumper(_Dumper):
def handles(self, data, mapping):
try:
return mapping and data in mapping
except TypeError:
return False
def dump(self, data, mapping):
self._write(mapping[data])
class NoneDumper(_Dumper):
def handles(self, data, mapping):
return data is None
def dump(self, data, mapping):
self._write('null')
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
from os.path import abspath, dirname, join, normpath
class HtmlTemplate(object):
_base_dir = join(dirname(abspath(__file__)), '..', 'htmldata')
def __init__(self, filename):
self._path = normpath(join(self._base_dir, filename.replace('/', os.sep)))
def __iter__(self):
with open(self._path) as file:
for line in file:
yield line.rstrip()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
from posixpath import normpath, join
from contextlib import contextmanager
from java.io import BufferedReader, InputStreamReader
# Works only when running from jar
from org.robotframework.RobotRunner import getResourceAsStream
class HtmlTemplate(object):
_base_dir = '/Lib/robot/htmldata/'
def __init__(self, filename):
self._path = normpath(join(self._base_dir, filename.replace(os.sep, '/')))
def __iter__(self):
with self._reader as reader:
line = reader.readLine()
while line is not None:
yield line.rstrip()
line = reader.readLine()
@property
@contextmanager
def _reader(self):
stream = getResourceAsStream(self._path)
reader = BufferedReader(InputStreamReader(stream, 'UTF-8'))
try:
yield reader
finally:
reader.close()
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Package for writing output files in HTML format.
This package is considered stable but it is not part of the public API.
"""
from .htmlfilewriter import HtmlFileWriter, ModelWriter
from .jsonwriter import JsonWriter
LOG = 'rebot/log.html'
REPORT = 'rebot/report.html'
LIBDOC = 'libdoc/libdoc.html'
TESTDOC = 'testdoc/testdoc.html'
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from .jartemplate import HtmlTemplate
except ImportError:
from .normaltemplate import HtmlTemplate
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fixml.py -- A tool to fix broken Robot Framework output files
Usage: fixml.py inpath outpath
This tool can fix Robot Framework output files that are not properly finished
or are missing elements from the middle. It should be possible to generate
reports and logs from the fixed output afterwards with the `rebot` tool.
The tool uses BeautifulSoup module which must be installed separately.
See http://www.crummy.com/software/BeautifulSoup for more information.
Additionally, the tool is only compatible with Robot Framework 2.1.3 or newer.
"""
from __future__ import with_statement
import sys
import os
try:
from BeautifulSoup import BeautifulStoneSoup
except ImportError:
raise ImportError('fixml.py requires BeautifulSoup to be installed: '
'http://www.crummy.com/software/BeautifulSoup/')
class Fixxxer(BeautifulStoneSoup):
NESTABLE_TAGS = {'suite': ['robot','suite', 'statistics'],
'doc': ['suite', 'test', 'kw'],
'metadata': ['suite'],
'item': ['metadata'],
'status': ['suite', 'test', 'kw'],
'test': ['suite'],
'tags': ['test'],
'tag': ['tags'],
'kw': ['suite', 'test', 'kw'],
'msg': ['kw', 'errors'],
'arguments': ['kw'],
'arg': ['arguments'],
'statistics': ['robot'],
'errors': ['robot']}
__close_on_open = None
def unknown_starttag(self, name, attrs, selfClosing=0):
if name == 'robot':
attrs = [(key, value if key != 'generator' else 'fixml.py')
for key, value in attrs]
if name == 'kw' and ('type', 'teardown') in attrs:
while self.tagStack[-1].name not in ['test', 'suite']:
self._popToTag(self.tagStack[-1].name)
if self.__close_on_open:
self._popToTag(self.__close_on_open)
self.__close_on_open = None
BeautifulStoneSoup.unknown_starttag(self, name, attrs, selfClosing)
def unknown_endtag(self, name):
BeautifulStoneSoup.unknown_endtag(self, name)
if name == 'status':
self.__close_on_open = self.tagStack[-1].name
else:
self.__close_on_open = None
def main(inpath, outpath):
with open(inpath) as infile:
with open(outpath, 'w') as outfile:
outfile.write(str(Fixxxer(infile)))
return outpath
if __name__ == '__main__':
try:
outpath = main(*sys.argv[1:])
except TypeError:
print __doc__
else:
print os.path.abspath(outpath)
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework Start/End/Elapsed Time Reporter
Usage: times2csv.py input-xml [output-csv] [include-items]
This script reads start, end, and elapsed times from all suites, tests and/or
keywords from the given output file, and writes them into an file in
comma-separated-values (CSV) format. CSV files can then be further processed
with spreadsheet programs. If the CSV output file is not given, its name is
got from the input file by replacing the '.xml' extension with '.csv'.
'include-items' can be used for defining which items to process. Possible
values are 'suite', 'test' and 'keyword', and they can be combined to specify
multiple items e.g. like 'suite-test' or 'test-keyword'.
Examples:
times2csv.py output.xml
times2csv.py path/results.xml path2/times.csv
times2csv.py output.xml times.csv test
times2csv.py output.xml times.csv suite-test
"""
import sys
import os
import csv
from robot.result import ExecutionResult
from robot import utils
def process_file(inpath, outpath, items):
suite = ExecutionResult(inpath).suite
outfile = open(outpath, 'wb')
writer = csv.writer(outfile)
writer.writerow(['TYPE', 'NAME', 'STATUS', 'START', 'END', 'ELAPSED',
'ELAPSED SECS'])
process_suite(suite, writer, items.lower())
outfile.close()
def process_suite(suite, writer, items, level=0):
if 'suite' in items:
process_item(suite, writer, level, 'Suite')
if 'keyword' in items:
for kw in suite.keywords:
process_keyword(kw, writer, level+1)
for subsuite in suite.suites:
process_suite(subsuite, writer, items, level+1)
for test in suite.tests:
process_test(test, writer, items, level+1)
def process_test(test, writer, items, level):
if 'test' in items:
process_item(test, writer, level, 'Test', 'suite' not in items)
if 'keyword' in items:
for kw in test.keywords:
process_keyword(kw, writer, level+1)
def process_keyword(kw, writer, level):
if kw is None:
return
process_item(kw, writer, level, kw.type.capitalize())
for subkw in kw.keywords:
process_keyword(subkw, writer, level+1)
def process_item(item, writer, level, item_type, long_name=False):
indent = '' if level == 0 else ('| ' * (level-1) + '|- ')
name = (item.longname if long_name else item.name).encode('UTF-8')
elapsed = utils.elapsed_time_to_string(item.elapsedtime)
writer.writerow([indent+item_type, name, item.status, item.starttime,
item.endtime, elapsed, item.elapsedtime/1000.0])
if __name__ == '__main__':
if not (2 <= len(sys.argv) <= 4) or sys.argv[1] in ('--help', '-h'):
print __doc__
sys.exit(1)
inxml = sys.argv[1]
try:
outcsv = sys.argv[2]
except IndexError:
outcsv = os.path.splitext(inxml)[0] + '.csv'
try:
items = sys.argv[3]
except IndexError:
items = 'suite-test-keyword'
process_file(inxml, outcsv, items)
print os.path.abspath(outcsv)
| Python |
#!/usr/bin/env python
# tool2html.py -- Creates HTML version of given tool documentation
#
# First part of this file is Pygments configuration and actual
# documentation generation follows it.
#
# Pygments configuration
#
# This code is from 'external/rst-directive.py' file included in Pygments 0.9
# distribution. For more details see http://pygments.org/docs/rstdirective/
#
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
import os
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
filtered = [ line for line in content if line ]
if len(filtered)==1 and os.path.isfile(filtered[0]):
content = open(content[0]).read().splitlines()
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
#
# Creating the documentation
#
# This code is based on rst2html.py distributed with docutils
#
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import sys
from docutils.core import publish_cmdline
def create_tooldoc(tool_name):
description = 'HTML generator for Robot Framework Tool Documentation.'
stylesheet_path = os.path.join(BASEDIR, '..', 'doc', 'userguide', 'src',
'userguide.css')
base_path = os.path.join(BASEDIR, tool_name, 'doc', tool_name)
arguments = [ '--time', '--stylesheet-path', [stylesheet_path],
base_path+'.txt', base_path+'.html' ]
publish_cmdline(writer_name='html', description=description, argv=arguments)
print os.path.abspath(arguments[-1])
BASEDIR = os.path.dirname(os.path.abspath(__file__))
VALID_TOOLS = [ name for name in os.listdir(BASEDIR) if '.' not in name ]
VALID_TOOLS = [ n for n in VALID_TOOLS if os.path.isdir(os.path.join(BASEDIR, n, 'doc')) ]
if __name__ == '__main__':
try:
tool = sys.argv[1].lower()
if tool == 'all':
for name in sorted(VALID_TOOLS):
create_tooldoc(name)
elif tool in VALID_TOOLS:
create_tooldoc(tool)
else:
raise IndexError
except IndexError:
print 'Usage: tool2html.py [ tool | all ]\n\nTools:'
for tool in sorted(VALID_TOOLS):
print ' %s' % tool
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '1.0'
import re
import sys
import inspect
import traceback
from StringIO import StringIO
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
try:
import signal
except ImportError:
signal = None
try:
from collections import Mapping
except ImportError:
Mapping = dict
BINARY = re.compile('[\x00-\x08\x0B\x0C\x0E-\x1F]')
NON_ASCII = re.compile('[\x80-\xff]')
class RobotRemoteServer(SimpleXMLRPCServer):
allow_reuse_address = True
_generic_exceptions = (AssertionError, RuntimeError, Exception)
_fatal_exceptions = (SystemExit, KeyboardInterrupt)
def __init__(self, library, host='127.0.0.1', port=8270, port_file=None,
allow_stop=True):
SimpleXMLRPCServer.__init__(self, (host, int(port)), logRequests=False)
self._library = library
self._allow_stop = allow_stop
self._shutdown = False
self._register_functions()
self._register_signal_handlers()
self._announce_start(port_file)
self.serve_forever()
def _register_functions(self):
self.register_function(self.get_keyword_names)
self.register_function(self.run_keyword)
self.register_function(self.get_keyword_arguments)
self.register_function(self.get_keyword_documentation)
self.register_function(self.stop_remote_server)
def _register_signal_handlers(self):
def stop_with_signal(signum, frame):
self._allow_stop = True
self.stop_remote_server()
raise KeyboardInterrupt
for name in 'SIGINT', 'SIGTERM', 'SIGHUP':
if hasattr(signal, name):
signal.signal(getattr(signal, name), stop_with_signal)
def _announce_start(self, port_file=None):
host, port = self.server_address
self._log('Robot Framework remote server at %s:%s starting.'
% (host, port))
if port_file:
pf = open(port_file, 'w')
try:
pf.write(str(port))
finally:
pf.close()
def serve_forever(self):
try:
while not self._shutdown:
self.handle_request()
except KeyboardInterrupt:
pass
def stop_remote_server(self):
prefix = 'Robot Framework remote server at %s:%s ' % self.server_address
if self._allow_stop:
self._log(prefix + 'stopping.')
self._shutdown = True
else:
self._log(prefix + 'does not allow stopping.', 'WARN')
return True
def get_keyword_names(self):
get_kw_names = getattr(self._library, 'get_keyword_names', None) or \
getattr(self._library, 'getKeywordNames', None)
if inspect.isroutine(get_kw_names):
names = get_kw_names()
else:
names = [attr for attr in dir(self._library) if attr[0] != '_'
and inspect.isroutine(getattr(self._library, attr))]
return names + ['stop_remote_server']
def run_keyword(self, name, args, kwargs=None):
args, kwargs = self._handle_binary_args(args, kwargs or {})
result = {'status': 'FAIL'}
self._intercept_std_streams()
try:
return_value = self._get_keyword(name)(*args, **kwargs)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
self._add_to_result(result, 'error',
self._get_error_message(exc_type, exc_value))
self._add_to_result(result, 'traceback',
self._get_error_traceback(exc_tb))
self._add_to_result(result, 'continuable',
self._get_error_attribute(exc_value, 'CONTINUE'),
default=False)
self._add_to_result(result, 'fatal',
self._get_error_attribute(exc_value, 'EXIT'),
default=False)
else:
try:
self._add_to_result(result, 'return',
self._handle_return_value(return_value))
except:
exc_type, exc_value, _ = sys.exc_info()
self._add_to_result(result, 'error',
self._get_error_message(exc_type, exc_value))
else:
result['status'] = 'PASS'
self._add_to_result(result, 'output', self._restore_std_streams())
return result
def _handle_binary_args(self, args, kwargs):
args = [self._handle_binary_arg(a) for a in args]
kwargs = dict([(k, self._handle_binary_arg(v)) for k, v in kwargs.items()])
return args, kwargs
def _handle_binary_arg(self, arg):
if isinstance(arg, Binary):
return arg.data
return arg
def _add_to_result(self, result, key, value, default=''):
if value != default:
result[key] = value
def get_keyword_arguments(self, name):
kw = self._get_keyword(name)
if not kw:
return []
return self._arguments_from_kw(kw)
def _arguments_from_kw(self, kw):
args, varargs, kwargs, defaults = inspect.getargspec(kw)
if inspect.ismethod(kw):
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += ['%s=%s' % (n, d) for n, d in zip(names, defaults)]
if varargs:
args.append('*%s' % varargs)
if kwargs:
args.append('**%s' % kwargs)
return args
def get_keyword_documentation(self, name):
if name == '__intro__':
return inspect.getdoc(self._library) or ''
if name == '__init__' and inspect.ismodule(self._library):
return ''
return inspect.getdoc(self._get_keyword(name)) or ''
def _get_keyword(self, name):
if name == 'stop_remote_server':
return self.stop_remote_server
kw = getattr(self._library, name, None)
if inspect.isroutine(kw):
return kw
return None
def _get_error_message(self, exc_type, exc_value):
if exc_type in self._fatal_exceptions:
self._restore_std_streams()
raise
name = exc_type.__name__
message = self._get_message_from_exception(exc_value)
if not message:
return name
if exc_type in self._generic_exceptions \
or getattr(exc_value, 'ROBOT_SUPPRESS_NAME', False):
return message
return '%s: %s' % (name, message)
def _get_message_from_exception(self, value):
# UnicodeError occurs below 2.6 and if message contains non-ASCII bytes
try:
msg = unicode(value)
except UnicodeError:
msg = ' '.join([self._str(a, handle_binary=False) for a in value.args])
return self._handle_binary_result(msg)
def _get_error_traceback(self, exc_tb):
# Latest entry originates from this class so it can be removed
entries = traceback.extract_tb(exc_tb)[1:]
trace = ''.join(traceback.format_list(entries))
return 'Traceback (most recent call last):\n' + trace
def _get_error_attribute(self, exc_value, name):
return bool(getattr(exc_value, 'ROBOT_%s_ON_FAILURE' % name, False))
def _handle_return_value(self, ret):
if isinstance(ret, basestring):
return self._handle_binary_result(ret)
if isinstance(ret, (int, long, float)):
return ret
if isinstance(ret, Mapping):
return dict([(self._str(key), self._handle_return_value(value))
for key, value in ret.items()])
try:
return [self._handle_return_value(item) for item in ret]
except TypeError:
return self._str(ret)
def _handle_binary_result(self, result):
if not self._contains_binary(result):
return result
try:
result = str(result)
except UnicodeError:
raise ValueError("Cannot represent %r as binary." % result)
return Binary(result)
def _contains_binary(self, result):
return (BINARY.search(result) or isinstance(result, str) and
sys.platform != 'cli' and NON_ASCII.search(result))
def _str(self, item, handle_binary=True):
if item is None:
return ''
if not isinstance(item, basestring):
item = unicode(item)
if handle_binary:
return self._handle_binary_result(item)
return item
def _intercept_std_streams(self):
sys.stdout = StringIO()
sys.stderr = StringIO()
def _restore_std_streams(self):
stdout = sys.stdout.getvalue()
stderr = sys.stderr.getvalue()
close = [sys.stdout, sys.stderr]
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
for stream in close:
stream.close()
if stdout and stderr:
if not stderr.startswith(('*TRACE*', '*DEBUG*', '*INFO*', '*HTML*',
'*WARN*')):
stderr = '*INFO* %s' % stderr
if not stdout.endswith('\n'):
stdout += '\n'
return self._handle_binary_result(stdout + stderr)
def _log(self, msg, level=None):
if level:
msg = '*%s* %s' % (level.upper(), msg)
self._write_to_stream(msg, sys.stdout)
if sys.__stdout__ is not sys.stdout:
self._write_to_stream(msg, sys.__stdout__)
def _write_to_stream(self, msg, stream):
stream.write(msg + '\n')
stream.flush()
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework Debugfile Viewer
Usage: fileviever.py [path]
This tool is mainly targeted for viewing Robot Framework debug files set
with '--debugfile' command line option when running test. The idea is to
provide a tool that has similar functionality as 'tail -f' command in
unixy systems.
The tool has a simple GUI which is updated every time the file opened into
it is updated. File can be given from command line or opened using 'Open'
button in the GUI.
"""
import os
import sys
import time
from FileDialog import LoadFileDialog
import Tkinter as Tk
class FileViewer:
def __init__(self, path=None):
self._path = path is not None and os.path.abspath(path) or None
self._file = self._open_file(path)
self._root = self._create_root()
self._create_components(self._root)
self._last_update_cmd = None
self._update()
def mainloop(self):
self._root.mainloop()
def _create_root(self):
root = Tk.Tk()
root.title('Debug file viewer, v0.1')
root.geometry('750x500+100+100')
return root
def _create_components(self, root):
self._create_toolbar(root)
self._create_statusbar(root)
self._text_area = self._create_scrollable_text_area(root)
def _create_statusbar(self, root):
statusbar = Tk.Frame(root)
self._statusbar_left = Tk.Label(statusbar)
self._statusbar_left.pack(side=Tk.LEFT)
self._statusbar_right = Tk.Label(statusbar)
self._statusbar_right.pack(side=Tk.RIGHT)
statusbar.pack(side=Tk.BOTTOM, fill=Tk.X)
def _create_toolbar(self, root):
toolbar = Tk.Frame(root, width=65)
self._create_button(toolbar, 'Open', self._open_file_dialog)
self._create_button(toolbar, 'Clear', self._clear_text)
self._create_button(toolbar, 'Exit', self._root.destroy)
self._pause_cont_button = self._create_button(toolbar, 'Pause',
self._pause_or_cont, 25)
toolbar.pack_propagate(0)
toolbar.pack(side=Tk.RIGHT, fill=Tk.Y)
def _create_button(self, parent, label, command, pady=2):
button = Tk.Button(parent, text=label, command=command)
button.pack(side=Tk.TOP, padx=2, pady=pady, fill=Tk.X)
return button
def _create_scrollable_text_area(self, root):
scrollbar = Tk.Scrollbar(root)
text = Tk.Text(root, yscrollcommand=scrollbar.set, font=("Courier", 9))
scrollbar.config(command=text.yview)
scrollbar.pack(side=Tk.RIGHT, fill=Tk.Y)
text.pack(fill=Tk.BOTH, expand=1)
return text
def _pause_or_cont(self):
if self._pause_cont_button['text'] == 'Pause':
if self._last_update_cmd is not None:
self._root.after_cancel(self._last_update_cmd)
self._pause_cont_button.configure(text='Continue')
else:
self._pause_cont_button.configure(text='Pause')
self._root.after(50, self._update)
def _update(self):
if self._file is None:
self._file = self._open_file(self._path)
if self._file is not None:
try:
if os.stat(self._path).st_size < self._last_file_size:
self._file.seek(0)
self._clear_text()
self._text_area.insert(Tk.END, self._file.read())
self._last_file_size = self._file.tell()
except (OSError, IOError):
self._file = None
self._clear_text()
self._text_area.yview('moveto', '1.0')
self._set_status_bar_text()
self._last_update_cmd = self._root.after(50, self._update)
def _clear_text(self):
self._text_area.delete(1.0, Tk.END)
def _set_status_bar_text(self):
left, right = self._path, ''
if self._path is None:
left = 'No file opened'
elif self._file is None:
right = 'File does not exist'
else:
timetuple = time.localtime(os.stat(self._path).st_mtime)
timestamp = '%d%02d%02d %02d:%02d:%02d' % timetuple[:6]
right = 'File last modified: %s' % timestamp
self._statusbar_left.configure(text=left)
self._statusbar_right.configure(text=right)
def _open_file(self, path):
if path is not None and os.path.exists(path):
self._last_file_size = os.stat(path).st_size
return open(path)
return None
def _open_file_dialog(self):
dialog = LoadFileDialog(self._root, title='Choose file to view')
fname = dialog.go()
if fname is None:
return
self._path = os.path.abspath(fname)
if self._last_update_cmd is not None:
self._root.after_cancel(self._last_update_cmd)
if self._file is not None:
self._file.close()
self._file = self._open_file(self._path)
self._clear_text()
if self._pause_cont_button['text'] == 'Continue':
self._pause_or_cont()
else:
self._update()
if __name__ == '__main__':
if len(sys.argv) > 2 or '--help' in sys.argv:
print __doc__
sys.exit(1)
app = FileViewer(*sys.argv[1:])
app.mainloop()
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Diff Tool for Robot Framework Outputs
Usage: robotdiff.py [options] input_files
This script compares two or more Robot Framework output files and creates a
report where possible differences between test case statuses in each file
are highlighted. Main use case is verifying that results from executing same
test cases in different environments are same. For example, it is possible to
test that new Robot Framework version does not affect test results. Another
usage is comparing earlier test results with newer ones to find out possible
status changes and added test cases.
Options:
-r --report file HTML report file (created from the input files).
Default is 'robotdiff.html'.
-n --name name * Custom names for test runs. If this option is used,
it must be used as many times as there are input
files. By default test run names are got from the
input file names.
-t --title title Title for the generated diff report. The default
title is 'Test Run Diff Report'.
-E --escape what:with * Escape certain characters which are problematic in
console. 'what' is the name of the character to
escape and 'with' is the string to escape it with.
Available character to escape:
<--------------------ESCAPES------------------------>
Example:
--escape space:_ --title My_Fine_Diff_Report
-h -? --help Print this usage instruction.
Options that can be specified multiple times are marked with an asterisk (*).
Examples:
$ robotdiff.py output1.xml output2.xml output3.xml
$ robotdiff.py --name Env1 --name Env2 smoke1.xml smoke2.xml
"""
import sys
import os.path
from robot.utils import ArgumentParser, NormalizedDict, HtmlWriter
from robot.result import ExecutionResult
from robot.errors import DataError, Information
def main(args):
opts, paths = _process_args(args)
results = DiffResults()
for path, name in zip(paths, _get_names(opts['name'], paths)):
try:
results.add_output(path, name)
except DataError, err:
_exit(err, error=True)
reporter = DiffReporter(opts['report'], opts['title'])
reporter.report(results)
_exit('Report: %s' % reporter.outpath)
def _process_args(cliargs):
ap = ArgumentParser(__doc__, arg_limits=(2, ))
try:
return ap.parse_args(cliargs)
except Information, msg:
_exit(msg)
except DataError, err:
_exit(err, error=True)
def _get_names(names, paths):
if not names:
return [None] * len(paths)
if len(names) == len(paths):
return names
_exit('Different number of test run names (%d) and input files (%d).'
% (len(names), len(paths)), error=True)
def _exit(msg, error=False):
print unicode(msg)
if error:
print "\nTry --help for usage information."
sys.exit(int(error))
class DiffResults(object):
def __init__(self):
self._stats = NormalizedDict()
self.column_names = []
@property
def rows(self):
return (RowStatus(name, statuses)
for name, statuses in sorted(self._stats.items()))
def add_output(self, path, column=None):
self._add_suite(ExecutionResult(path).suite)
self.column_names.append(column or path)
for stats in self._stats.values():
self._add_missing_statuses(stats)
def _add_suite(self, suite):
self._add_to_stats(suite)
for sub_suite in suite.suites:
self._add_suite(sub_suite)
for test in suite.tests:
self._add_to_stats(test)
def _add_to_stats(self, item):
stats = self._stats.setdefault(item.longname, [])
self._add_missing_statuses(stats)
stats.append(ItemStatus(item))
def _add_missing_statuses(self, stats):
while len(stats) < len(self.column_names):
stats.append(MissingStatus())
class MissingStatus(object):
name = 'N/A'
status = 'not_available'
class ItemStatus(object):
def __init__(self, item):
self.name = item.status
self.status = item.status.lower()
class RowStatus(object):
def __init__(self, name, statuses):
self.name = name
self._statuses = statuses
@property
def status(self):
passed = any(stat.name == 'PASS' for stat in self)
failed = any(stat.name == 'FAIL' for stat in self)
missing = any(stat.name == 'N/A' for stat in self)
if passed and failed:
return 'diff'
if missing:
return 'missing'
return 'all_passed' if passed else 'all_failed'
@property
def explanation(self):
return {'all_passed': 'All passed',
'all_failed': 'All failed',
'missing': 'Missing items',
'diff': 'Different statuses'}[self.status]
def __iter__(self):
return iter(self._statuses)
class DiffReporter(object):
def __init__(self, outpath=None, title=None):
self.outpath = os.path.abspath(outpath or 'robotdiff.html')
self._title = title or 'Test Run Diff Report'
self._writer = HtmlWriter(open(self.outpath, 'w'))
def report(self, results):
self._start(results.column_names)
for row in results.rows:
self._write_row(row)
self._end()
def _start(self, columns):
self._writer.content(START_HTML % {'TITLE': self._title}, escape=False)
self._writer.start('tr')
self._writer.element('th', 'Name', {'class': 'col_name'})
for name in columns:
self._writer.element('th', name, {'class': 'col_status'})
self._writer.end('tr')
def _write_row(self, row):
self._writer.start('tr')
self._write_name(row)
for item in row:
self._write_status(item)
self._writer.end('tr')
def _write_name(self, row):
self._writer.element('td', row.name, {'class': 'col_name ' + row.status,
'title': row.explanation})
def _write_status(self, item):
self._writer.element('td', item.name,
{'class': 'col_status ' + item.status})
def _end(self):
for tag in 'table', 'body', 'html':
self._writer.end(tag)
self._writer.close()
START_HTML = '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="Expires" content="Mon, 20 Jan 2001 20:01:21 GMT">
<style media="all" type="text/css">
body {
background: white;
font-family: sans-serif;
font-size: 0.8em;
color: black;
}
table {
border: 1px solid black;
border-collapse: collapse;
empty-cells: show;
margin: 0px 1px;
}
th, td {
border: 1px solid black;
}
th {
background: #C6C6C6;
}
.col_name {
min-width: 25em;
font-weight: bold;
}
.col_status {
min-width: 6em;
text-align: center;
}
.pass {
color: #0F0;
}
.fail {
color: #F00;
}
.not_available {
color: #777;
}
.all_passed, .all_failed {
background: #0F0;
}
.missing {
background: #FF0;
}
.diff {
background: #F00;
}
</style>
<title>%(TITLE)s</title>
</head>
<body>
<h1>%(TITLE)s</h1>
<table>
'''[1:]
if __name__ == '__main__':
main(sys.argv[1:])
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""risto.py -- Robot Framework's Historical Reporting Tool
Version: <VERSION>
Usage: risto.py options input_files
or: risto.py options1 --- options2 --- optionsN --- input files
or: risto.py --argumentfile path
risto.py plots graphs about test execution history based on statistics
read from Robot Framework output files. Actual drawing is handled by
Matplotlib tool, which must be installed separately. More information
about it, including installation instructions, can be found from
http://matplotlib.sourceforge.net.
By default risto.py draws total, passed and failed graphs for critical
tests and all tests, but it is possible to omit some of these graphs
and also to add graphs by tags. Names of test rounds that are shown on
the x-axis are, by default, got from the paths to input files.
Alternatively, names can be got from the metadata of the top level
test suite (see Robot Framework's '--metadata' option for more details).
The graph is saved into a file specified with '--output' option, and the
output format is got from the file extension. Supported formats depend on the
installed Matplotlib back-ends, but at least PNG ought to be always available.
If the output file is omitted, the graph is opened into Matplotlib's image
viewer (which requires Matplotlib to be installed with some graphical
front-end).
It is possible to draw multiple graphs with different options at once. This
is done by separating different option groups with three or more hyphens
('---'). Note that in this case also paths to input files need to be
separated from last options similarly.
Instead of giving all options from the command line, it is possible to read
them from a file specified with '--argument' option. In an argument file
options and their possible argument are listed one per line, and option
groups are separated with lines of three or more hyphens. Empty lines and
lines starting with a hash mark ('#') are ignored.
Options:
-C --nocritical Do not plot graphs for critical tests.
-A --noall Do not plot graphs for all tests.
-T --nototals Do not plot total graphs.
-P --nopassed Do not plot passed graphs.
-F --nofailed Do not plot failed graphs.
-t --tag name * Add graphs for these tags. Name can contain '*' and
'?' as wildcards.
-o --output path Path to the image file to create. If not given, the
image is opened into Matplotlib's image viewer.
-i --title title Title of the graph. Underscores in the given title
are converted to spaces. By default there is no
title.
-w --width inches Width of the image. Default is 800.
-h --height inches Height of the image. Default is 400.
-f --font size Font size used for legends and labels. Default is 8.
-m --marker size Size of marked used with tag graphs. Default is 5.
-x --xticks num Maximum number of ticks in x-axis. Default is 15.
-n --namemeta name Name of the metadata of the top level test suite
where to get name of the test round. By default names
are got from paths to input files.
--- Used to group options when creating multiple images
at once.
--argumentfile path Read arguments from the specified file.
--verbose Verbose output.
--help Print this help.
--version Print version information.
Examples:
risto.py --output history.png output1.xml output2.xml output3.xml
risto.py --title My_Report --noall --namemeta Date --output out.png *.xml
risto.py --nopassed --tag smoke --tag iter-* results/*/output.xml
risto.py -CAP -t tag1 --- -CAP -t tag2 --- -CAP -t tag3 --- outputs/*.xml
risto.py --argumentfile arguments.txt
====[arguments.txt]===================
--title Overview
--output overview.png
----------------------
--nocritical
--noall
--nopassed
--tag smoke1
--title Smoke Tests
--output smoke.png
----------------------
path/to/*.xml
======================================
"""
from __future__ import with_statement
import os.path
import sys
import glob
try:
from matplotlib import pylab
from matplotlib.lines import Line2D
from matplotlib.font_manager import FontProperties
from matplotlib.pyplot import get_current_fig_manager
except ImportError:
raise ImportError('Could not import Matplotlib modules. Install it form '
'http://matplotlib.sourceforge.net/')
try:
from robot import utils
from robot.errors import DataError, Information
except ImportError:
raise ImportError('Could not import Robot Framework modules. '
'Make sure you have Robot Framework installed.')
__version__ = '1.0.2'
class AllStatistics(object):
def __init__(self, paths, namemeta=None, verbose=False):
self._stats = self._get_stats(paths, namemeta, verbose)
self._tags = self._get_tags()
def _get_stats(self, paths, namemeta, verbose):
paths = self._glob_paths(paths)
if namemeta:
return [Statistics(path, namemeta=namemeta, verbose=verbose)
for path in paths]
return [Statistics(path, name, verbose=verbose)
for path, name in zip(paths, self._get_names(paths))]
def _glob_paths(self, orig):
paths = []
for path in orig:
paths.extend(glob.glob(path))
if not paths:
raise DataError("No valid paths given.")
return paths
def _get_names(self, paths):
paths = [os.path.splitext(os.path.abspath(p))[0] for p in paths]
path_tokens = [p.replace('\\', '/').split('/') for p in paths]
min_tokens = min(len(t) for t in path_tokens)
index = -1
while self._tokens_are_same_at_index(path_tokens, index):
index -= 1
if abs(index) > min_tokens:
index = -1
break
names = [tokens[index] for tokens in path_tokens]
return [utils.printable_name(n, code_style=True) for n in names]
def _tokens_are_same_at_index(self, token_list, index):
first = token_list[0][index]
for tokens in token_list[1:]:
if first != tokens[index]:
return False
return len(token_list) > 1
def _get_tags(self):
stats = {}
for statistics in self._stats:
stats.update(statistics.tags)
return [stat.name for stat in sorted(stats.values())]
def plot(self, plotter):
plotter.set_axis(self._stats)
plotter.critical_tests([s.critical_tests for s in self._stats])
plotter.all_tests([s.all_tests for s in self._stats])
for tag in self._tags:
plotter.tag([s[tag] for s in self._stats])
class Statistics(object):
def __init__(self, path, name=None, namemeta=None, verbose=False):
if verbose:
print path
root = utils.ET.ElementTree(file=path).getroot()
self.name = self._get_name(name, namemeta, root)
stats = root.find('statistics')
crit_node, all_node = list(stats.find('total'))
self.critical_tests = Stat(crit_node)
self.all_tests = Stat(all_node)
self.tags = dict((n.text, Stat(n)) for n in stats.find('tag'))
def _get_name(self, name, namemeta, root):
if namemeta is None:
if name is None:
raise TypeError("Either 'name' or 'namemeta' must be given")
return name
metadata = root.find('suite').find('metadata')
if metadata:
for item in metadata:
if item.get('name','').lower() == namemeta.lower():
return item.text
raise DataError("No metadata matching '%s' found" % namemeta)
def __getitem__(self, name):
try:
return self.tags[name]
except KeyError:
return EmptyStat(name)
class Stat(object):
def __init__(self, node):
self.name = node.text
self.passed = int(node.get('pass'))
self.failed = int(node.get('fail'))
self.total = self.passed + self.failed
self.doc = node.get('doc', '')
info = node.get('info', '')
self.critical = info == 'critical'
self.non_critical = info == 'non-critical'
self.combined = info == 'combined'
def __cmp__(self, other):
if self.critical != other.critical:
return self.critical is True and -1 or 1
if self.non_critical != other.non_critical:
return self.non_critical is True and -1 or 1
if self.combined != other.combined:
return self.combined is True and -1 or 1
return cmp(self.name, other.name)
class EmptyStat(Stat):
def __init__(self, name):
self.name = name
self.passed = self.failed = self.total = 0
self.doc = ''
self.critical = self.non_critical = self.combined = False
class Legend(Line2D):
def __init__(self, **attrs):
styles = {'color': '0.5', 'linestyle': '-', 'linewidth': 1}
styles.update(attrs)
Line2D.__init__(self, [], [], **styles)
class Plotter(object):
_total_color = 'blue'
_pass_color = 'green'
_fail_color = 'red'
_background_color = '0.8'
_xtick_rotation = 20
_default_width = 800
_default_height = 400
_default_font = 8
_default_marker = 5
_default_xticks = 15
_dpi = 100
_marker_symbols = 'o s D ^ v < > d p | + x 1 2 3 4 . ,'.split()
def __init__(self, tags=None, critical=True, all=True, totals=True,
passed=True, failed=True, width=None, height=None, font=None,
marker=None, xticks=None):
self._xtick_limit, self._font_size, self._marker_size, width, height \
= self._get_sizes(xticks, font, marker, width, height)
self._figure = pylab.figure(figsize=(width, height))
self._axes = self._figure.add_axes([0.05, 0.15, 0.65, 0.70])
# axes2 is used only for getting ytick labels also on right side
self._axes2 = self._axes.twinx()
self._axes2.set_xticklabels([], visible=False)
self._tags = tags or []
self._critical = critical
self._all = all
self._totals = totals
self._passed = passed
self._failed = failed
self._legends = []
self._markers = iter(self._marker_symbols)
def _get_sizes(self, xticks, font, marker, width, height):
xticks = xticks or self._default_xticks
font = font or self._default_font
marker = marker or self._default_marker
width = width or self._default_width
height = height or self._default_height
try:
return (int(xticks), int(font), int(marker),
float(width)/self._dpi, float(height)/self._dpi)
except ValueError:
raise DataError('Width, height, font and xticks must be numbers.')
def set_axis(self, stats):
slen = len(stats)
self._indexes = range(slen)
self._xticks = self._get_xticks(slen, self._xtick_limit)
self._axes.set_xticks(self._xticks)
self._axes.set_xticklabels([stats[i].name for i in self._xticks],
rotation=self._xtick_rotation,
size=self._font_size)
self._scale = (slen-1, max(s.all_tests.total for s in stats))
def _get_xticks(self, slen, limit):
if slen <= limit:
return range(slen)
interval, extra = divmod(slen-1, limit-1) # 1 interval less than ticks
if interval < 2:
interval = 2
limit, extra = divmod(slen-1, interval)
limit += 1
return [ self._get_index(i, interval, extra) for i in range(limit) ]
def _get_index(self, count, interval, extra):
if count < extra:
extra = count
return count * interval + extra
def critical_tests(self, stats):
if self._critical:
line = {'linestyle': '--', 'linewidth': 1}
self._plot(self._indexes, stats, **line)
self._legends.append(Legend(label='critical tests', **line))
def all_tests(self, stats):
if self._all:
line = {'linestyle': ':', 'linewidth': 1}
self._plot(self._indexes, stats, **line)
self._legends.append(Legend(label='all tests', **line))
def tag(self, stats):
if utils.MultiMatcher(self._tags).match(stats[0].name):
line = {'linestyle': '-', 'linewidth': 0.3}
mark = {'marker': self._get_marker(),
'markersize': self._marker_size}
self._plot(self._indexes, stats, **line)
markers = [stats[index] for index in self._xticks]
self._plot(self._xticks, markers, linestyle='', **mark)
line.update(mark)
label = self._get_tag_label(stats)
self._legends.append(Legend(label=label, **line))
def _get_tag_label(self, stats):
label = stats[0].name
# need to go through all stats because first can be EmptyStat
for stat in stats:
if stat.critical:
return label + ' (critical)'
if stat.non_critical:
return label + ' (non-critical)'
return label
def _get_marker(self):
try:
return self._markers.next()
except StopIteration:
return ''
def _plot(self, xaxis, stats, **attrs):
total, passed, failed \
= zip(*[(s.total, s.passed, s.failed) for s in stats])
if self._totals:
self._axes.plot(xaxis, total, color=self._total_color, **attrs)
if self._passed:
self._axes.plot(xaxis, passed, color=self._pass_color, **attrs)
if self._failed:
self._axes.plot(xaxis, failed, color=self._fail_color, **attrs)
def draw(self, output=None, title=None):
self._set_scale(self._axes)
self._set_scale(self._axes2)
self._set_legends(self._legends[:])
if title:
title = title.replace('_', ' ')
self._axes.set_title(title, fontsize=self._font_size*1.8)
if output:
self._figure.savefig(output, facecolor=self._background_color,
dpi=self._dpi)
else:
if not hasattr(self._figure, 'show'):
raise DataError('Could not find a graphical front-end for '
'Matplotlib.')
self._figure.show()
if title:
figman = get_current_fig_manager()
figman.set_window_title(title)
def _set_scale(self, axes):
width, height = self._scale
axes.axis([-width*0.01, width*1.01, -height*0.04, height*1.04])
def _set_legends(self, legends):
legends.insert(0, Legend(label='Styles:', linestyle=''))
legends.append(Legend(label='', linestyle=''))
legends.append(Legend(label='Colors:', linestyle=''))
if self._totals:
legends.append(Legend(label='total', color=self._total_color))
if self._passed:
legends.append(Legend(label='passed', color=self._pass_color))
if self._failed:
legends.append(Legend(label='failed', color=self._fail_color))
labels = [l.get_label() for l in legends]
self._figure.legend(legends, labels, loc='center right',
numpoints=3, borderpad=0.1,
prop=FontProperties(size=self._font_size))
class Ristopy(object):
def __init__(self):
self._arg_parser = utils.ArgumentParser(__doc__, version=__version__)
def main(self, args):
args = self._process_possible_argument_file(args)
try:
opt_groups, paths = self._split_to_option_groups_and_paths(args)
except ValueError:
viewer_open = self._plot_one_graph(args)
else:
viewer_open = self._plot_multiple_graphs(opt_groups, paths)
if viewer_open:
try:
raw_input('Press enter to exit.\n')
except (EOFError, KeyboardInterrupt):
pass
pylab.close('all')
def _plot_one_graph(self, args):
opts, paths = self._arg_parser.parse_args(args)
stats = AllStatistics(paths, opts['namemeta'], opts['verbose'])
output = self._plot(stats, opts)
return output is None
def _plot_multiple_graphs(self, opt_groups, paths):
viewer_open = False
stats = AllStatistics(paths, opt_groups[0]['namemeta'],
opt_groups[0]['verbose'])
for opts in opt_groups:
output = self._plot(stats, opts)
viewer_open = output is None or viewer_open
return viewer_open
def _plot(self, stats, opts):
plotter = Plotter(opts['tag'], not opts['nocritical'],
not opts['noall'], not opts['nototals'],
not opts['nopassed'], not opts['nofailed'],
opts['width'], opts['height'], opts['font'],
opts['marker'], opts['xticks'])
stats.plot(plotter)
plotter.draw(opts['output'], opts['title'])
if opts['output']:
print os.path.abspath(opts['output'])
return opts['output']
def _process_possible_argument_file(self, args):
try:
index = args.index('--argumentfile')
except ValueError:
return args
path = args[index+1]
try:
lines = open(path).readlines()
except IOError:
raise DataError("Invalid argument file '%s'" % path)
fargs = []
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
elif line.startswith('-'):
fargs.extend(line.split(' ', 1))
else:
fargs.append(line)
args[index:index+2] = fargs
return args
def _split_to_option_groups_and_paths(self, args):
opt_groups = []
current = []
for arg in args:
if arg.replace('-', '') == '' and len(arg) >= 3:
opts = self._arg_parser.parse_args(current)[0]
opt_groups.append(opts)
current = []
else:
current.append(arg)
if opt_groups:
return opt_groups, current
raise ValueError("Nothing to split")
if __name__ == '__main__':
try:
Ristopy().main(sys.argv[1:])
except Information, msg:
print str(msg)
except DataError, err:
print '%s\n\nTry --help for usage information.' % err
| Python |
#!/usr/bin/env python
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework Test Status Checker
Usage: statuschecker.py infile [outfile]
This tool processes Robot Framework output XML files and checks that test case
statuses and messages are as expected. Main use case is post-processing output
files got when testing Robot Framework test libraries using Robot Framework
itself.
If output file is not given, the input file is considered to be also output
file and it is edited in place.
By default all test cases are expected to 'PASS' and have no message. Changing
the expected status to 'FAIL' is done by having word 'FAIL' (in uppercase)
somewhere in the test case documentation. Expected error message must then be
given after 'FAIL'. Error message can also be specified as a regular
expression by prefixing it with string 'REGEXP:'. Testing only the beginning
of the message is possible with 'STARTS:' prefix.
This tool also allows testing the created log messages. They are specified
using a syntax 'LOG x.y:z LEVEL Actual message', which is described in detail
detail in the tool documentation.
"""
import re
from robot.result import ExecutionResult
def process_output(inpath, outpath=None):
result = ExecutionResult(inpath)
_process_suite(result.suite)
result.save(outpath)
return result.return_code
def _process_suite(suite):
for subsuite in suite.suites:
_process_suite(subsuite)
for test in suite.tests:
_process_test(test)
def _process_test(test):
exp = _Expected(test.doc)
_check_status(test, exp)
if test.status == 'PASS':
_check_logs(test, exp)
def _check_status(test, exp):
if exp.status != test.status:
test.status = 'FAIL'
if exp.status == 'PASS':
test.message = ("Test was expected to PASS but it FAILED. "
"Error message:\n") + test.message
else:
test.message = ("Test was expected to FAIL but it PASSED. "
"Expected message:\n") + exp.message
elif not _message_matches(test.message, exp.message):
test.status = 'FAIL'
test.message = ("Wrong error message.\n\nExpected:\n%s\n\nActual:\n%s\n"
% (exp.message, test.message))
elif test.status == 'FAIL':
test.status = 'PASS'
test.message = 'Original test failed as expected.'
def _message_matches(actual, expected):
if actual == expected:
return True
if expected.startswith('REGEXP:'):
pattern = '^%s$' % expected.replace('REGEXP:', '', 1).strip()
if re.match(pattern, actual, re.DOTALL):
return True
if expected.startswith('STARTS:'):
start = expected.replace('STARTS:', '', 1).strip()
if actual.startswith(start):
return True
return False
def _check_logs(test, exp):
for kw_indices, msg_index, level, message in exp.logs:
try:
kw = test.keywords[kw_indices[0]]
for index in kw_indices[1:]:
kw = kw.keywords[index]
except IndexError:
indices = '.'.join(str(i+1) for i in kw_indices)
test.status = 'FAIL'
test.message = ("Test '%s' does not have keyword with index '%s'"
% (test.name, indices))
return
if len(kw.messages) <= msg_index:
if message != 'NONE':
test.status = 'FAIL'
test.message = ("Keyword '%s' should have had at least %d "
"messages" % (kw.name, msg_index+1))
else:
if _check_log_level(level, test, kw, msg_index):
_check_log_message(message, test, kw, msg_index)
def _check_log_level(expected, test, kw, index):
actual = kw.messages[index].level
if actual == expected:
return True
test.status = 'FAIL'
test.message = ("Wrong level for message %d of keyword '%s'.\n\n"
"Expected: %s\nActual: %s.\n%s"
% (index+1, kw.name, expected,
actual, kw.messages[index].message))
return False
def _check_log_message(expected, test, kw, index):
actual = kw.messages[index].message.strip()
if _message_matches(actual, expected):
return True
test.status = 'FAIL'
test.message = ("Wrong content for message %d of keyword '%s'.\n\n"
"Expected:\n%s\n\nActual:\n%s"
% (index+1, kw.name, expected, actual))
return False
class _Expected:
def __init__(self, doc):
self.status, self.message = self._get_status_and_message(doc)
self.logs = self._get_logs(doc)
def _get_status_and_message(self, doc):
if 'FAIL' in doc:
return 'FAIL', doc.split('FAIL', 1)[1].split('LOG', 1)[0].strip()
return 'PASS', ''
def _get_logs(self, doc):
logs = []
for item in doc.split('LOG')[1:]:
index_str, msg_str = item.strip().split(' ', 1)
kw_indices, msg_index = self._get_indices(index_str)
level, message = self._get_log_message(msg_str)
logs.append((kw_indices, msg_index, level, message))
return logs
def _get_indices(self, index_str):
try:
kw_indices, msg_index = index_str.split(':')
except ValueError:
kw_indices, msg_index = index_str, '1'
kw_indices = [int(index) - 1 for index in kw_indices.split('.')]
return kw_indices, int(msg_index) - 1
def _get_log_message(self, msg_str):
try:
level, message = msg_str.split(' ', 1)
if level not in ['TRACE', 'DEBUG', 'INFO', 'WARN', 'FAIL']:
raise ValueError
except ValueError:
level, message = 'INFO', msg_str
return level, message
if __name__=='__main__':
import sys
import os
if not 2 <= len(sys.argv) <= 3 or '--help' in sys.argv:
print __doc__
sys.exit(1)
infile = sys.argv[1]
outfile = sys.argv[2] if len(sys.argv) == 3 else None
print "Checking %s" % os.path.abspath(infile)
rc = process_output(infile, outfile)
if outfile:
print "Output: %s" % os.path.abspath(outfile)
if rc > 255:
rc = 255
sys.exit(rc)
| Python |
#!/usr/bin/env python
import sys
import os
import tempfile
DATABASE_FILE = os.path.join(tempfile.gettempdir(), 'robotframework-quickstart-db.txt')
class DataBase(object):
def __init__(self, dbfile):
"""This class reads ands writes user data in a 'database'.
dbfile can be either or string or already opened file object. In the
former case, dbfile is considered to be path. If a file object is given
it must be opened in a mode that allows both reading and writing.
"""
self._dbfile, self._users = self._read_users(dbfile)
def _read_users(self, dbfile):
users = {}
if isinstance(dbfile, basestring):
if not os.path.isfile(dbfile):
return open(dbfile, 'w'), users
else:
dbfile = open(dbfile, 'r+')
for row in dbfile.read().splitlines():
user = User(*row.split('\t'))
users[user.username] = user
return dbfile, users
def create_user(self, username, password):
try:
user = User(username, password)
except ValueError, err:
return 'Creating user failed: %s' % err
self._users[user.username] = user
return 'SUCCESS'
def login(self, username, password):
if self._is_valid_user(username, password):
self._users[username].status = 'Active'
return 'Logged In'
return 'Access Denied'
def _is_valid_user(self, username, password):
return username in self._users and \
self._users[username].password == password
def change_password(self, username, old_pwd, new_pwd):
try:
if not self._is_valid_user(username, old_pwd):
raise ValueError('Access Denied')
self._users[username].password = new_pwd
except ValueError, err:
return 'Changing password failed: %s' % err
else:
return 'SUCCESS'
def close(self):
self._dbfile.seek(0)
self._dbfile.truncate()
for user in self._users.values():
user.serialize(self._dbfile)
self._dbfile.close()
class User(object):
def __init__(self, username, password, status='Inactive'):
self.username = username
self.password = password
self.status = status
def _get_password(self):
return self._password
def _set_password(self, password):
self._validate_password(password)
self._password = password
password = property(_get_password, _set_password)
def _validate_password(self, password):
if not (6 < len(password) < 13):
raise ValueError('Password must be 7-12 characters long')
has_lower = has_upper = has_number = has_invalid = False
for char in password:
if char.islower():
has_lower = True
elif char.isupper():
has_upper = True
elif char.isdigit():
has_number = True
else:
has_invalid = True
break
if has_invalid or not (has_lower and has_upper and has_number):
raise ValueError('Password must be a combination of lowercase '
'and uppercase letters and numbers')
def serialize(self, dbfile):
dbfile.write('%s\t%s\t%s\n' %
(self.username, self.password, self.status))
def login(username, password):
db = DataBase(DATABASE_FILE)
print db.login(username, password)
db.close()
def create_user(username, password):
db = DataBase(DATABASE_FILE)
print db.create_user(username, password)
db.close()
def change_password(username, old_pwd, new_pwd):
db = DataBase(DATABASE_FILE)
print db.change_password(username, old_pwd, new_pwd)
db.close()
def help():
print 'Usage: %s { create | login | change-password | help }' \
% os.path.basename(sys.argv[0])
if __name__ == '__main__':
actions = {'create': create_user, 'login': login,
'change-password': change_password, 'help': help}
try:
action = sys.argv[1]
except IndexError:
action = 'help'
args = sys.argv[2:]
try:
actions[action](*args)
except (KeyError, TypeError):
help()
| Python |
#!/usr/bin/env python
"""qs2html.py -- Creates HTML version of Robot Framework Quick Start Guide
Usage: qs2html.py [ cr(eate) | dist | zip ]
create .. Creates the HTML version of the Quick Start Guide.
dist .... Creates the Quick Start Guide and copies it and all its dependencies
under directory named 'robotframework-quickstart-<date>'.
zip ..... Uses 'dist' to create the Quick Start Guide distribution and then
packages it into 'robotframework-quickstart-<date>.zip'.
"""
import sys
import os
import shutil
import time
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'userguide'))
import ug2html # This also initializes docutils and pygments
def create_quickstart():
from docutils.core import publish_cmdline
print 'Creating Quick Start Guide ...'
qsdir = os.path.dirname(os.path.abspath(__file__))
description = 'Quick Start Guide for Robot Framework'
arguments = '''
--time
--stylesheet-path=../userguide/src/userguide.css
quickstart.rst
quickstart.html
'''.split('\n')[1:-1]
os.chdir(qsdir)
publish_cmdline(writer_name='html', description=description, argv=arguments)
qspath = arguments[-1]
print os.path.abspath(qspath)
return qspath
def create_distribution():
qspath = create_quickstart() # we are in doc/quickstart after this
outdir = 'robotframework-quickstart-%d%02d%02d' % time.localtime()[:3]
files = { '': [qspath], 'testlibs': ['LoginLibrary.py'],
'sut': ['login.py', 'test_login.py'] }
print 'Creating distribution directory ...'
if os.path.exists(outdir):
print 'Removing previous distribution'
shutil.rmtree(outdir)
os.mkdir(outdir)
for dirname, files in files.items():
dirpath = os.path.join(outdir, dirname)
if not os.path.exists(dirpath):
print "Creating output directory '%s'" % dirpath
os.mkdir(dirpath)
for name in files:
source = os.path.join(dirname, name)
print "Copying '%s' -> '%s'" % (source, dirpath)
shutil.copy(source, dirpath)
return outdir
def create_zip():
qsdir = create_distribution()
ug2html.zip_distribution(qsdir)
if __name__ == '__main__':
actions = { 'create': create_quickstart, 'cr': create_quickstart,
'dist': create_distribution, 'zip': create_zip }
try:
actions[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError):
print __doc__
| Python |
import os
import sys
import subprocess
class LoginLibrary:
def __init__(self):
self._sut_path = os.path.join(os.path.dirname(__file__),
'..', 'sut', 'login.py')
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
if not sys.executable:
raise RuntimeError("Could not find Jython installation")
command = [sys.executable, self._sut_path, command] + list(args)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._status = process.communicate()[0].strip()
| Python |
def simple_keyword():
"""Log a message"""
print 'You have used the simplest keyword.'
def greet(name):
"""Logs a friendly greeting to person given as argument"""
print 'Hello %s!' % name
def multiply_by_two(number):
"""Returns the given number multiplied by two
The result is always a floating point number.
This keyword fails if the given `number` cannot be converted to number.
"""
return float(number) * 2
def numbers_should_be_equal(first, second):
print '*DEBUG* Got arguments %s and %s' % (first, second)
if float(first) != float(second):
raise AssertionError('Given numbers are unequal!')
| Python |
#!/usr/bin/env python
"""pt2html.py -- Creates HTML version of Python Tutorial
Usage: pt2html.py
"""
import sys
import os
import shutil
import time
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'userguide'))
import ug2html # This also initializes docutils and pygments
def create_tutorial():
from docutils.core import publish_cmdline
print 'Creating Python Tutorial ...'
os.chdir(os.path.dirname(os.path.abspath(__file__)))
description = 'Python Tutorial for Robot Framework Library Developers'
arguments = '''
--time
--stylesheet-path=../userguide/src/userguide.css
PythonTutorial.rst
PythonTutorial.html
'''.split('\n')[1:-1]
publish_cmdline(writer_name='html', description=description, argv=arguments)
path = arguments[-1]
print os.path.abspath(path)
return path
if __name__ == '__main__':
create_tutorial()
| Python |
#!/usr/bin/env python
"""Usage: lib2html.py [ library | all ]
Libraries:
BuiltIn (bu)
Collections (co)
Dialogs (di)
OperatingSystem (op)
Process (pr)
Screenshot (sc)
String (st)
Telnet (te)
XML (xm)
"""
import sys
import os
import re
ROOT = os.path.normpath(os.path.join(os.path.abspath(__file__),'..','..','..'))
sys.path.insert(0, os.path.join(ROOT,'src'))
from robot.libdoc import libdoc
LIBRARIES = {}
for line in __doc__.splitlines():
res = re.search('\s+(\w+) \((\w+)\)', line)
if res:
name, alias = res.groups()
LIBRARIES[name.lower()] = LIBRARIES[alias] = name
def create_libdoc(name):
ipath = os.path.join(ROOT,'src','robot','libraries',name+'.py')
opath = os.path.join(ROOT,'doc','libraries',name+'.html')
libdoc(ipath, opath)
if __name__ == '__main__':
try:
name = sys.argv[1].lower()
if name == 'all':
for name in sorted(set(LIBRARIES.values())):
create_libdoc(name)
else:
create_libdoc(LIBRARIES[name])
except (IndexError, KeyError):
print __doc__
| Python |
#!/usr/bin/env python
"""ug2html.py -- Creates HTML version of Robot Framework User Guide
Usage: ug2html.py [ cr(eate) | dist | zip ]
create .. Creates the user guide so that it has relative links to images,
library docs, etc. This version is stored in the version control
and distributed with the source distribution.
dist .... Creates the user guide under 'robotframework-userguide-<version>'
directory and also copies all needed images and other link targets
there. The created output directory can thus be distributed
independently.
zip ..... Uses 'dist' to create a stand-alone distribution and then packages
it into 'robotframework-userguide-<version>.zip'
Version number to use is got automatically from 'src/robot/version.py' file
created by 'package.py'.
"""
import os
import sys
import shutil
# First part of this file is Pygments configuration and actual
# documentation generation follows it.
#
#
# Pygments configuration
# ----------------------
#
# This code is from 'external/rst-directive.py' file included in Pygments 0.9
# distribution. For more details see http://pygments.org/docs/rstdirective/
#
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
# possibility to read the content from an external file
filtered = [ line for line in content if line.strip() ]
if len(filtered) == 1:
path = filtered[0].replace('/', os.sep)
if os.path.isfile(path):
content = open(path).read().splitlines()
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
#
# Create the user guide using docutils
#
# This code is based on rst2html.py distributed with docutils
#
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
def create_userguide():
from docutils.core import publish_cmdline
print 'Creating user guide ...'
ugdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(ugdir, '..', '..', 'src', 'robot'))
from version import get_version
print 'Version:', get_version()
vfile = open(os.path.join(ugdir, 'src', 'version.rst'), 'w')
vfile.write('.. |version| replace:: %s\n' % get_version())
vfile.close()
description = 'HTML generator for Robot Framework User Guide.'
arguments = ['--time',
'--stylesheet-path', ['src/userguide.css'],
'src/RobotFrameworkUserGuide.rst',
'RobotFrameworkUserGuide.html']
os.chdir(ugdir)
publish_cmdline(writer_name='html', description=description, argv=arguments)
os.unlink(vfile.name)
ugpath = os.path.abspath(arguments[-1])
print ugpath
return ugpath, get_version(sep='-')
#
# Create user guide distribution directory
#
def create_distribution():
import re
from urlparse import urlparse
ugpath, version = create_userguide() # we are in doc/userguide after this
outdir = 'robotframework-userguide-%s' % version
tools = os.path.join(outdir, 'tools')
templates = os.path.join(outdir, 'templates')
libraries = os.path.join(outdir, 'libraries')
images = os.path.join(outdir, 'images')
print 'Creating distribution directory ...'
if os.path.exists(outdir):
print 'Removing previous user guide distribution'
shutil.rmtree(outdir)
for dirname in [outdir, tools, templates, libraries, images]:
print "Creating output directory '%s'" % dirname
os.mkdir(dirname)
def replace_links(res):
if not res.group(5):
return res.group(0)
scheme, _, path, _, _, fragment = urlparse(res.group(5))
if scheme or (fragment and not path):
return res.group(0)
replaced_link = '%s %s="%%s/%s"' % (res.group(1), res.group(4),
os.path.basename(path))
if path.startswith('../../tools'):
copy(path, tools)
copy_tool_images(path)
replaced_link = replaced_link % 'tools'
elif path.startswith('../../templates'):
copy(path, templates)
replaced_link = replaced_link % 'templates'
elif path.startswith('../libraries'):
copy(path, libraries)
replaced_link = replaced_link % 'libraries'
elif path.startswith('src/'):
copy(path, images)
replaced_link = replaced_link % 'images'
else:
raise ValueError('Invalid link target: %s (context: %s)'
% (path, res.group(0)))
print "Modified link '%s' -> '%s'" % (res.group(0), replaced_link)
return replaced_link
def copy(source, dest):
print "Copying '%s' -> '%s'" % (source, dest)
shutil.copy(source, dest)
def copy_tool_images(path):
indir = os.path.dirname(path)
for line in open(os.path.splitext(path)[0]+'.txt').readlines():
if line.startswith('.. figure::'):
copy(os.path.join(indir, line.strip().split()[-1]), tools)
link_regexp = re.compile('''
(<(a|img)\s+.*?)
(\s+(href|src)="(.*?)"|>)
''', re.VERBOSE | re.DOTALL | re.IGNORECASE)
content = open(ugpath).read()
content = link_regexp.sub(replace_links, content)
outfile = open(os.path.join(outdir, os.path.basename(ugpath)), 'wb')
outfile.write(content)
outfile.close()
print os.path.abspath(outfile.name)
return outdir
#
# Create a zip distribution package
#
def create_zip():
ugdir = create_distribution()
zip_distribution(ugdir)
def zip_distribution(dirpath):
"""Generic zipper. Used also by qs2html.py """
from zipfile import ZipFile, ZIP_DEFLATED
print 'Creating zip package ...'
zippath = os.path.normpath(dirpath) + '.zip'
zipfile = ZipFile(zippath, 'w', compression=ZIP_DEFLATED)
for root, _, files in os.walk(dirpath):
for name in files:
path = os.path.join(root, name)
print "Adding '%s'" % path
zipfile.write(path)
zipfile.close()
print 'Removing distribution directory', dirpath
shutil.rmtree(dirpath)
print os.path.abspath(zippath)
if __name__ == '__main__':
actions = { 'create': create_userguide, 'cr': create_userguide,
'dist': create_distribution, 'zip': create_zip }
try:
actions[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError):
print __doc__
| Python |
class LoggingLibrary:
"""Library for logging messages.
= Table of contents =
- `Usage`
- `Valid log levels`
- `Examples`
- `Importing`
- `Shortcuts`
- `Keywords`
= Usage =
This library has several keyword, for example `Log Message`, for logging
messages. In reality the library is used only for _libdoc_ demonstration
purposes.
= Valid log levels =
Valid log levels are `INFO`, `DEBUG`, and `TRACE`. The default log level
can be set during `importing`.
= Examples =
Notice how keywords are linked from examples.
| `Log Message` | My message | | |
| `Log Two Messages` | My message | Second message | level=DEBUG |
| `Log Messages` | First message | Second message | Third message |
"""
ROBOT_LIBRARY_VERSION = '0.1'
def __init__(self, default_level='INFO'):
"""The default log level can be given at library import time.
See `Valid log levels` section for information about available log
levels.
Examples:
| *Setting* | *Value* | *Value* | *Comment* |
| Library | LoggingLibrary | | # Use default level (`INFO`) |
| Library | LoggingLibrary | DEBUG | # Use the given level |
"""
self.default_level = self._verify_level(default_level)
def _verify_level(self, level):
level = level.upper()
if level not in ['INFO', 'DEBUG', 'TRACE']:
raise RuntimeError("Invalid log level'%s'. Valid levels are "
"'INFO', 'DEBUG', and 'TRACE'")
return level
def log_message(self, message, level=None):
"""Writes given message to the log file using the specified log level.
If no level is given, the default level given during `library
importing` is used.
"""
level = self._verify_level(level) if level else self.default_level
print "*%s* %s" % (level, message)
def log_two_messages(self, message1, message2, level=None):
"""Writes given messages to the log file using the specified log level.
See `Log Message` keyword for more information.
"""
self.log_message(message1, level)
self.log_message(message2, level)
def log_messages(self, *messages):
"""Logs given messages using the log level set during `importing`.
See also `Log Message` and `Log Two Messages`.
"""
for msg in messages:
self.log_message(msg)
| Python |
#!/usr/bin/env python
import os
import shutil
from optparse import OptionParser
from os.path import abspath, dirname, join
from subprocess import call
from sys import exit
class GenerateApiDocs(object):
BUILD_DIR = abspath(dirname(__file__))
AUTODOC_DIR = join(BUILD_DIR, 'autodoc')
ROOT = join(BUILD_DIR, '..', '..')
ROBOT_DIR = join(ROOT, 'src', 'robot')
JAVA_SRC = join(ROOT, 'src', 'java')
JAVA_TARGET = join(BUILD_DIR, '_static', 'javadoc')
def __init__(self):
try:
import sphinx as _
except ImportError:
exit('Generating API docs requires Sphinx')
self.options = GeneratorOptions()
def run(self):
self.create_autodoc()
if self.options.javadoc:
self.create_javadoc()
orig_dir = abspath(os.curdir)
os.chdir(self.BUILD_DIR)
rc = call(['make', 'html'], shell=os.name == 'nt')
os.chdir(orig_dir)
print abspath(join(self.BUILD_DIR, '_build', 'html', 'index.html'))
exit(rc)
def create_autodoc(self):
self._clean_directory(self.AUTODOC_DIR)
print 'Genearting autodoc'
call(['sphinx-apidoc', '--output-dir', self.AUTODOC_DIR, '--force',
'--no-toc', '--maxdepth', '2', self.ROBOT_DIR])
def create_javadoc(self):
self._clean_directory(self.JAVA_TARGET)
print 'Generating javadoc'
call(['javadoc', '-sourcepath', self.JAVA_SRC, '-d', self.JAVA_TARGET,
'-notimestamp', 'org.robotframework'])
def _clean_directory(self, dirname):
if os.path.exists(dirname):
print 'Cleaning', dirname
shutil.rmtree(dirname)
class GeneratorOptions():
usage = '''
generate.py [options]
This script creates API documentation from both Python and Java source code
included in `src/python and `src/java`, respectively. Python autodocs are
created in `doc/api/autodoc` and Javadocs in `doc/api/_static/javadoc`.
API documentation entry point is create using Sphinx's `make html`.
Sphinx, sphinx-apidoc and javadoc commands need to be in $PATH.
'''
def __init__(self):
self._parser = OptionParser(self.usage)
self._add_options()
self._options, _ = self._parser.parse_args()
if not self._options._javadoc:
self._prompt_for_generation('javadoc')
@property
def javadoc(self):
return self._options._javadoc
def _add_options(self):
self._parser.add_option('-j', '--javadoc',
action='store_true',
dest='_javadoc',
help='Generates Javadoc'
)
def _prompt_for_generation(self, attr_name):
selection = raw_input('Generate also %s? '
'[Y/N] (N by default) > ' % attr_name.title())
if len(selection) > 0 and selection[0].lower() == 'y':
setattr(self._options, '_%s' % attr_name, True)
if __name__ == '__main__':
GenerateApiDocs().run()
| Python |
#!/usr/bin/env python
"""Usage: check_test_times.py seconds inpath [outpath]
Reads test execution result from an output XML file and checks that no test
took longer than given amount of seconds to execute.
Optional `outpath` specifies where to write processed results. If not given,
results are written over the original file.
"""
import sys
from robot.api import ExecutionResult, ResultVisitor
class ExecutionTimeChecker(ResultVisitor):
def __init__(self, max_seconds):
self.max_milliseconds = max_seconds * 1000
def visit_test(self, test):
if test.status == 'PASS' and test.elapsedtime > self.max_milliseconds:
test.status = 'FAIL'
test.message = 'Test execution took too long.'
def check_tests(seconds, inpath, outpath=None):
result = ExecutionResult(inpath)
result.visit(ExecutionTimeChecker(float(seconds)))
result.save(outpath)
if __name__ == '__main__':
try:
check_tests(*sys.argv[1:])
except TypeError:
print __doc__
| Python |
class CheckMultipleItemsLibrary:
def items_should_not_contain(self, value, *items):
"""Checks that none of the given 'items' contains the given 'value'."""
items_containing_value = [ item for item in items if value in item ]
if items_containing_value:
message = "Items '%s' contains '%s'"
message = message % (', '.join(items_containing_value), value)
raise AssertionError(message)
| Python |
"""Robot Framework test library example that calls C code.
This example uses Python's standard `ctypes` module, which requires
that the C code is compiled into a shared library.
It is also possible to execute this file from the command line
to test the C code manually.
"""
from ctypes import CDLL, c_char_p
LIBRARY = CDLL('./liblogin.so') # On Windows we'd use '.dll'
def check_user(username, password):
"""Validates user name and password using imported shared C library."""
if not LIBRARY.validate_user(c_char_p(username), c_char_p(password)):
raise AssertionError('Wrong username/password combination')
if __name__ == '__main__':
import sys
try:
check_user(*sys.argv[1:])
except TypeError:
print 'Usage: %s username password' % sys.argv[0]
except AssertionError, err:
print err
else:
print 'Valid password'
| Python |
#!/usr/bin/env python
"""Custom Robot Framework installation script.
Usage: python install.py [ in(stall) | un(install) | re(install) ]
Using `python install.py install` simply runs `python setup.py install`
internally. You need to use `setup.py` directly, if you want to alter the
default installation somehow.
To install with with Jython or IronPython instead of Python, replace `python`
with `jython` or `ipy`, respectively.
For more information about installation in general see
http://code.google.com/p/robotframework/wiki/Installation
"""
import glob
import os
import shutil
import sys
def install():
_remove(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'build'))
print 'Installing Robot Framework...'
setup = os.path.join(os.path.dirname(sys.argv[0]), 'setup.py')
rc = os.system('"%s" %s install' % (sys.executable, setup))
if rc != 0:
print 'Installation failed.'
sys.exit(rc)
print 'Installation was successful.'
def uninstall():
print 'Uninstalling Robot Framework...'
try:
instdir = _get_installation_directory()
except Exception:
print 'Robot Framework is not installed or the installation is corrupted.'
sys.exit(1)
_remove(instdir)
if not 'robotframework' in instdir:
_remove_egg_info(instdir)
_remove_runners()
print 'Uninstallation was successful.'
def reinstall():
uninstall()
install()
def _get_installation_directory():
import robot
# Ensure we got correct robot module
if 'Robot' not in robot.pythonpathsetter.__doc__:
raise TypeError
robot_dir = os.path.dirname(robot.__file__)
parent_dir = os.path.dirname(robot_dir)
if 'robotframework' in os.path.basename(parent_dir):
return parent_dir
return robot_dir
def _remove_runners():
runners = ['pybot', 'jybot', 'ipybot', 'rebot', 'jyrebot', 'ipyrebot']
if os.sep == '\\':
runners = [r + '.bat' for r in runners]
for name in runners:
if os.name == 'java':
_remove(os.path.join(sys.prefix, 'bin', name))
elif os.sep == '\\':
_remove(os.path.join(sys.prefix, 'Scripts', name))
else:
for dirpath in ['/bin', '/usr/bin/', '/usr/local/bin']:
_remove(os.path.join(dirpath, name))
def _remove_egg_info(instdir):
pattern = os.path.join(os.path.dirname(instdir), 'robotframework-*.egg-info')
for path in glob.glob(pattern):
_remove(path)
def _remove(path):
if not os.path.exists(path):
return
try:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except Exception, err:
print "Removing '%s' failed: %s" % (path, err)
else:
print "Removed '%s'" % path
if __name__ == '__main__':
actions = {'install': install, 'in': install,
'uninstall': uninstall, 'un': uninstall,
'reinstall': reinstall, 're': reinstall}
try:
actions[sys.argv[1]]()
except (KeyError, IndexError):
print __doc__
| Python |
#!/usr/bin/env python
"""rundevel.py -- script to run current code
Usage: [interpreter] rundevel.py [run|rebot] [options] [arguments]
Examples:
./rundevel.py --name Example tests.txt # run with python
./rundevel.py run --name Example tests.txt # same as above
jython rundevel.py --name Example tests.txt # run with jython
./rundevel.py rebot --name Example out.xml # rebot with python
ipy rundevel.py rebot --name Example out.xml # rebot with ipy
"""
from os.path import abspath, dirname, exists, join
from os import mkdir, putenv
import sys
if len(sys.argv) == 1:
sys.exit(__doc__)
curdir = dirname(abspath(__file__))
tmp = join(curdir, 'tmp')
if not exists(tmp):
mkdir(tmp)
sys.path.insert(0, join(curdir, 'src'))
from robot import run_cli, rebot_cli
if sys.argv[1] == 'rebot':
runner = rebot_cli
args = sys.argv[2:]
else:
runner = run_cli
args = ['--pythonpath', join(curdir, 'atest', 'testresources', 'testlibs'),
'--pythonpath', tmp, '--loglevel', 'DEBUG']
args += sys.argv[2:] if sys.argv[1] == 'run' else sys.argv[1:]
putenv('ROBOT_SYSLOG_FILE', join(tmp, 'syslog.txt'))
runner(['--outputdir', tmp] + args)
| Python |
from robot import run as run_robot
import cProfile
import pstats
filename = 'robot.profile'
cProfile.run('run_robot("/home/husa/workspace/robotframework/atest/testdata/misc/")', filename)
p = pstats.Stats(filename)
p.strip_dirs().sort_stats(-1).print_stats()
| Python |
#!/usr/bin/env python
"""Profiler for Robot Framework `run` and `rebot`.
Usage: profiler.py run|rebot [options] arguments
"""
import cProfile
import pstats
import os
from os.path import abspath, dirname, join
import sys
import tempfile
rootdir = dirname(dirname(abspath(__file__)))
sys.path.insert(0, join(rootdir, 'src'))
from robot.run import run_cli
from robot.rebot import rebot_cli
def profile(profiled):
results = tempfile.mktemp(suffix='.out', prefix='pybot-profile',
dir=join(rootdir, 'tmp'))
cProfile.run(profiled, results)
stats = pstats.Stats(results)
stats.sort_stats('cumulative').print_stats(50)
os.remove(results)
if __name__ == '__main__':
try:
profiled = {'run': 'run_cli(sys.argv[2:])',
'rebot': 'rebot_cli(sys.argv[2:])'}[sys.argv[1]]
except (IndexError, KeyError):
sys.exit(__doc__)
profile(profiled)
| Python |
VALUE_FROM_VAR_FILE='Expected Value'
| Python |
def this_keyword_is_in_funnylib():
print 'jee'
| Python |
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import subprocess
import time
from random import randint
import os
import re
import sys
from robot.libraries import BuiltIn
from robot.utils import html_escape, ArgumentParser
from robot.version import get_version
class Parallel(object):
"""
Library for executing tests in parallel from inside of a robot test case.
Tests are executed in subprocesses.
You can add arguments to all parallel test runs from `library importing`,
for a set of parallel tests with `Add Arguments For Parallel Tests` and
for an individual parallel test by passing the arguments in `Start Parallel Test`.
The following command line arguments (also from argument files) are automatically
passed to parallel tests:
--loglevel, --runmode, --pythonpath, --variable, --variablefile
Example:
| *Settings* |
| Library | Parallel | pybot |
| *Test Cases* |
| Runner |
| | Run Parallel Tests | Hello | World |
| Hello |
| | [Tags] | parallel |
| | Log | Hello ${WORLD} |
| World |
| | [Tags] | parallel |
| | Log | ${HELLO} World |
`pybot --exclude parallel --variable HELLO:Hello --variable WORLD:World .`
"""
def __init__(self, runner_script, *arguments):
"""
`runner_script` is pybot or jybot or a custom script.
`arguments` are default arguments given to every test execution.
Example:
| Library | Parallel | pybot | --variable | variable:value | --loglevel | DEBUG |
"""
self._script = runner_script
self._arguments = self._get_arguments(arguments)
self._processes = []
self._data_source = None
def _get_arguments(self, additional_arguments):
options,_ = ArgumentParser(_get_cmd_arguments()).parse_args(sys.argv[1:])
args = []
for arg in ['loglevel', 'runmode', 'variable', 'variablefile']:
args += self._get_type_arguments(options, arg)
args += list(additional_arguments)
return args
def _get_type_arguments(self, options, key):
value = options[key]
args = []
if value is not None:
if not isinstance(value, list):
value = [value]
for var in value:
args += ['--%s' % key, var]
return args
def add_arguments_for_parallel_tests(self, *arguments):
"""Adds `arguments` to be used when parallel test is started.
`arguments` is a list of arguments to pass to parallel executions.
In the following example variable my_var is used in both of the tests
started with the keyword `Run Parallel Tests`:
| Add Arguments For Parallel Tests | --variable | my_var:value |
| Run Parallel Tests | Test | Another Test |
"""
self._arguments += list(arguments)
def set_data_source_for_parallel_tests(self, data_source):
"""Sets data source which is used when parallel tests are started.
`data_source` is path to file which contains the test/tests which are
started/executed with keywords `Start Parallel Test` or `Run Parallel
Tests`.
If tests to be executed are in the same suite and Robot Framework 2.5
or later is used, there is no need to use this keyword as `data_source`
can be automatically resolved.
Examples:
| Set Data Source For Parallel Tests | ${CURDIR}${/}my_parallel_suite.txt |
| Start Parallel Test | My Parallel Test |
| Wait All Parallel Tests |
"""
self._data_source = data_source
def start_parallel_test(self, test_name, *arguments):
"""Starts executing test with given `test_name` and `arguments`.
`arguments` is a list of Robot Framework command line arguments passed to
the started test execution. It should not include data source. Use
`Set Data Source For Parallel Tests` keyword for setting the data
source. Additional arguments can also be set in library import and with
`Add Arguments For Parallel Tests` keyword.
Returns a process object that represents this execution.
Example:
| Set Data Source For Parallel Tests | MySuite.txt |
| Start Parallel Test | Test From My Suite |
| Set Data Source For Parallel Tests | MyFriendsSuite.txt |
| Start Parallel Test | Test From My Friends Suite |
| Wait All Parallel Tests |
"""
if self._data_source is None:
self._data_source = BuiltIn.BuiltIn().replace_variables('${SUITE_SOURCE}')
process = _ParaRobo(test_name, self._data_source,
self._arguments+list(arguments))
process.run(self._script)
self._processes.append(process)
return process
def run_parallel_tests(self, *test_names):
"""Executes all given tests parallel and wait those to be ready.
Arguments can be set with keyword `Add Arguments For Parallel Tests`
and data source with keyword `Set Data Source For Parallel Tests`.
Example:
| Add Arguments For Parallel Tests | --variable | SOME_VARIABLE:someValue |
| Set Data Source For Parallel Tests | MySuite.txt |
| Run Parallel Tests | My Parallel Test | My Another Parallel Test |
When the parallel tests are from different data sources see the example in `Start Parallel Test`.
"""
processes = []
for name in test_names:
processes += [self.start_parallel_test(name)]
self.wait_parallel_tests(*processes)
def wait_parallel_tests(self, *processes):
"""Waits given `processes` to be ready and fails if any of the tests failed.
`Processes` are list of test execution processes returned from keyword
`Start Parallel Test`.
Example
| ${test 1}= | Start Parallel Test | First Test |
| ${test 2}= | Start Parallel Test | Test That Runs All The Time |
| Wait Parallel Tests | ${test 1} |
| ${test 3}= | Start Parallel Test | Third Test |
| Wait Parallel Tests | ${test 2} | ${test 3} |
"""
failed = []
for process in processes:
if process.wait() != 0:
failed += [process.test]
process.report()
self._processes.remove(process)
if failed:
raise AssertionError("Following tests failed:\n%s" % "\n".join(failed))
def wait_all_parallel_tests(self):
"""Wait all started test executions to be ready and fails if any of those failed."""
self.wait_parallel_tests(*self._processes)
def stop_all_parallel_tests(self):
"""Forcefully stops all the test executions.
NOTE: Requires Python 2.6 or later.
"""
for process in self._processes:
process.stop_test_execution()
self._processes = []
class _ParaRobo(object):
def __init__(self, test, data_source, arguments):
self.test = test
self._data_source = data_source
self._args = arguments
self._built_in = BuiltIn.BuiltIn()
id = self._create_id()
self._output = 'output_%s.xml' % id
self._log = 'log_%s.html' % id
self._output_dir = self._built_in.replace_variables("${OUTPUT DIR}")
self._monitor_out = os.path.join(self._output_dir, 'monitor_%s.txt' % id)
@property
def _suite_name(self):
name = os.path.splitext(os.path.basename(self._data_source))[0]
name = name.split('__', 1)[-1] # Strip possible prefix
name = name.replace('_', ' ').strip()
if name.islower():
name = name.title()
return name
def _create_id(self):
return "%s_%s" % (randint(0, 10000), time.strftime('%Y%m%d_%H%m%S.')+\
('%03d' % (int(time.time()*1000) % 1000)))
def run(self, script):
self._monitor_file = open(self._monitor_out, 'w')
cmd = [script,
'--outputdir', self._output_dir,
'--output', self._output,
'--report', 'None',
'--log', self._log,
'--monitorcolors', 'off',
'--test', self.test]+\
self._args + [self._data_source]
print "Starting test execution: %s" % " ".join(cmd)
self._process = subprocess.Popen(cmd,
shell=os.sep == '\\',
stdout=self._monitor_file,
stderr=self._monitor_file,
env=self._get_environment_variables())
def _get_environment_variables(self):
environment_variables = os.environ.copy()
if environment_variables.has_key("ROBOT_SYSLOG_FILE"):
del(environment_variables["ROBOT_SYSLOG_FILE"])
return environment_variables
def wait(self):
rc = self._process.wait()
self._monitor_file.close()
return rc
def report(self):
with open(self._monitor_out, 'r') as monitor_file:
monitor_output = monitor_file.read()
try:
os.remove(self._monitor_out)
except:
pass
match = re.search('^Log: (.*)$', monitor_output, re.MULTILINE)
monitor_output = self._replace_stdout_log_message_levels(monitor_output)
monitor_output = html_escape(monitor_output)
if match:
monitor_output = monitor_output.replace(match.group(1), '<a href="%s#test_%s.%s">%s</a>' % (self._log, self._suite_name, self.test, match.group(1)))
monitor_output = self._add_colours(monitor_output)
print "*HTML* %s" % monitor_output
def _replace_stdout_log_message_levels(self, output):
for level in ['TRACE', 'WARN', 'DEBUG', 'INFO', 'HTML']:
output = output.replace('\n*%s*' % level, '\n *%s*' % level)
return output
def _add_colours(self, output):
for name, colour in [("PASS", "pass"), ("FAIL", "fail"), ("ERROR", "fail")]:
output = output.replace(' %s ' % name, ' <span class="%s">%s</span> ' % (colour, name))
return output
def stop_test_execution(self):
try:
self._process.terminate()
except AttributeError:
pass
self.report()
def _get_cmd_arguments():
import robot
runner_path = os.path.join(os.path.dirname(os.path.abspath(robot.__file__)),
'run.py')
with open(runner_path, 'r') as runner_file:
runner_content = runner_file.read()
return re.search('"""(.+)"""', runner_content, re.DOTALL).groups()[0]
| Python |
from Queue import Queue
from threading import Event
try:
from multiprocessing.managers import BaseManager
except ImportError:
class Python26Required(object):
def __call__(self, *args):
raise RuntimeError('Requires Python > 2.6')
def __getattr__(self, name):
raise RuntimeError('Requires Python > 2.6')
BaseManager = Python26Required()
class _create_caching_getter(object):
def __init__(self, clazz):
self._clazz = clazz
self._objects = {}
def __call__(self, key):
if key not in self._objects:
self._objects[key] = self._clazz()
return self._objects[key]
class Communicate(object):
"""Library for communication between processes.
For example this can be used to handle communication between processes of the Parallel robot library.
Requires Python 2.6
Example:
Process 1 test file:
| *Settings* |
| Library | Communicate |
| *Test Cases* |
| Communicator |
| | [Setup] | Start Communication Service |
| | Send Message To | my message queue | hello world! |
| | ${message}= | Receive Message From | other message queue |
| | Should Be Equal | ${message} | hello! |
| | [Teardown] | Stop Communication Service |
Process 2 test file:
| *Settings* |
| Library | Communicate | ${process 1 ip address if on a different machine} |
| *Test Cases* |
| Helloer |
| | ${message}= | Receive Message From | my message queue |
| | Should Be Equal | ${message} | hello world! |
| | Send Message To | other message queue | hello! |
"""
def __init__(self, address='127.0.0.1', port=2187):
"""
`address` of the communication server.
`port` of the communication server.
"""
self._address = address
self._port = int(port)
self._authkey = 'live long and prosper'
self._queue = None
self._connected = False
def _connect(self):
self._create_manager().connect()
self._connected = True
def start_communication_service(self):
"""Starts a communication server that will be used to share messages and objects between processes.
"""
self._create_manager(_create_caching_getter(Queue),
_create_caching_getter(Event)).start()
self._connected = True
def stop_communication_service(self):
"""Stops a started communication server.
This ensures that the server and the messages that it has don't influence the next tests.
To ensure that this keyword really happens place this in the teardown section.
"""
self._manager.shutdown()
self._connected = False
def _create_manager(self, queue_getter=None, event_getter=None):
BaseManager.register('get_queue', queue_getter)
BaseManager.register('get_event', event_getter)
self._manager = BaseManager((self._address, self._port), self._authkey)
return self._manager
def send_message_to(self, queue_id, value):
"""Send a message to a message queue.
`queue_id` is the identifier for the queue.
`value` is the message. This can be a string, a number or any serializable object.
Example:
In one process
| Send Message To | my queue | hello world! |
...
In another process
| ${message}= | Receive Message From | my queue |
| Should Be Equal | ${message} | hello world! |
"""
self._get_queue(queue_id).put(value)
def receive_message_from(self, queue_id, timeout=None):
"""Receive and consume a message from a message queue.
By default this keyword will block until there is a message in the queue.
`queue_id` is the identifier for the queue.
`timeout` is the time out in seconds to wait.
Returns the value from the message queue. Fails if timeout expires.
Example:
In one process
| Send Message To | my queue | hello world! |
...
In another process
| ${message}= | Receive Message From | my queue |
| Should Be Equal | ${message} | hello world! |
"""
timeout = float(timeout) if timeout is not None else None
return self._get_queue(queue_id).get(timeout=timeout)
def _get_queue(self, queue_id):
if not self._connected:
self._connect()
return self._manager.get_queue(queue_id)
def wait_for_event(self, event_id, timeout=None):
"""Waits until event with `event_id` is signaled.
Fails if optional timeout expires.
`timeout` is the time out in seconds to wait.
Example:
In one process
| Wait For Event | my event |
...
In another process
| Signal Event | my event |
"""
timeout = float(timeout) if timeout is not None else None
self._get_event(event_id).wait(timeout=timeout)
#NOTE! If Event#clear is ever exposed it has to be secured (for example r/w lock) that none
#of the processes can do it while another is at this position.
if not self._get_event(event_id).isSet():
raise Exception('Timeout')
def signal_event(self, event_id):
"""Signals an event.
If a process is waiting for this event it will stop waiting after the signal.
`event` is the identifier for the event.
Example:
In one process
| Wait For Event | my event |
...
In another process
| Signal Event | my event |
"""
return self._get_event(event_id).set()
def _get_event(self, event_id):
if not self._connected:
self._connect()
return self._manager.get_event(event_id)
| Python |
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from robot.reporting.outputparser import OutputParser
from robot.result.builders import ResultFromXML
try:
import psutil
import objgraph
except ImportError:
print """
Please install psutil and objgraph - this script does not work without them.
"""
raise
def calculate_rebot_model(args):
if args[0] == '--reference':
xml = OutputParser().parse(args[1])
else:
xml = ResultFromXML(args[0])
p = psutil.Process(os.getpid())
print 'Process memory usage after xml parsing %f M' % (float(p.get_memory_info().rss) / (1024**2))
print 'Most common types'
objgraph.show_most_common_types()
return xml
if __name__ == '__main__':
if len(sys.argv) < 2:
print """
Simple memory profiler for robot output xml parsing.
Calculates memory usages after result model has been created.
--reference will calculate using list model implementation.
usage:
calculate_rebot_model.py [PATH_TO_OUTPUT_XML]
calculate_rebot_model.py --reference [PATH_TO_OUTPUT_XML]
"""
else:
calculate_rebot_model(sys.argv[1:])
| Python |
#!/usr/bin/env python
"""A tool for creating data driven test case for Robot Framework
Usage: testgen.py variablefile template output
This script reads the variable and template files and generates a test suite
which has all test cases found in the template multiplied with all the rows of
the variable file. Suite settings, variables and user keywords from the template
file are serialized as is.
Currently, the input files must be in tsv (tab separated values) format. Also
the output file is written in tsv. The variables file must have a format
demonstrated in the example below, e.g. header row, followed by a row with the
names of the variables, and on the subsequent rows the values for the
variables.
Options:
-h -? --help Print this usage instruction.
Example:
<<template.tsv>>
* Settings *
Documentation Example data driven suite
* Test Cases *
Example Test Keyword ${arg1} ${arg2}
* User Keywords *
Keyword [Arguments] ${val1} ${val2}
Log Many ${val1} ${val2}
<<variables.tsv>>
* Variables *
${arg1} ${arg2}
value1 value2
value11 value22
Given above files, command
python testgen.py variables.tsv template.tsv output.tsv
produces following test suite:
<<output.tsv>>
* Settings *
Documentation Example data driven suite
* Test Cases *
Example Test 1 Keyword value1 value2
Example Test 2 Keyword value11 value22
* User Keywords *
Keyword [Arguments] ${val1} ${val2}
Log Many ${val1} ${val2}
"""
import sys
import os
import csv
from robot.parsing.model import FileSuite
from robot.parsing.tsvreader import TsvReader
from robot.errors import DataError, Information
from robot import utils
class TestGeneratingSuite(FileSuite):
def serialize(self, variables, serializer):
self._serialize_settings(serializer)
self._serialize_variables(serializer)
self._serialize_tests(variables, serializer)
self._serialize_keywords(serializer)
def _serialize_settings(self, serializer):
serializer.start_settings()
if self.doc:
serializer.setting('Documentation', self.doc)
for name, value in self.metadata.items():
serializer.setting('Meta: %s' % name, [value])
for name in ['Default Tags', 'Force Tags', 'Suite Setup',
'Suite Teardown', 'Test Setup', 'Test Teardown',
]:
value = self._get_setting(self, name)
if value:
serializer.setting(name, value)
for imp in self.imports:
serializer.setting(imp.name, imp._item.value)
serializer.end_settings()
def _serialize_variables(self, serializer):
serializer.start_variables()
for var in self.variables:
serializer.variable(var.name, var.value)
serializer.end_variables()
def _serialize_tests(self, variables, serializer):
serializer.start_testcases()
for test in self.tests:
orig_name = test.name
for index, vars in enumerate(variables):
test.name = '%s %d' % (orig_name, (index+1))
serializer.start_testcase(test)
if test.doc:
serializer.setting('Documentation', [test.doc])
for name in ['Setup', 'Tags', 'Timeout']:
value = self._get_setting(test, name)
if value is not None:
serializer.setting(name, value)
for kw in test.keywords:
data = self._replace_variables(vars, [kw.name] + kw.args)
serializer.keyword(data)
if test.teardown is not None:
serializer.setting('Teardown', test.teardown)
serializer.end_testcase()
serializer.end_testcases()
def _serialize_keywords(self, serializer):
serializer.start_keywords()
for uk in self.user_keywords:
serializer.start_keyword(uk)
args = self._format_args(uk.args, uk.defaults, uk.varargs)
if args:
serializer.setting('Arguments', args)
if uk.doc:
serializer.setting('Documentation', uk.doc)
if uk.timeout is not None:
serializer.setting('Timeout', uk.timeout)
for kw in uk.keywords:
serializer.keyword([kw.name] + kw.args)
if uk.return_value:
serializer.setting('Return Value', uk.return_value)
serializer.end_keywords()
def _replace_variables(self, variables, data):
replaced = []
for elem in data:
for key in variables:
if key in elem:
elem = elem.replace(key, variables[key])
replaced.append(elem)
return replaced
def _get_setting(self, item, name):
return getattr(item, name.lower().replace(' ', '_'))
def _format_args(self, args, defaults, varargs):
parsed = []
if args:
parsed.extend(list(args))
if defaults:
for i, value in enumerate(defaults):
index = len(args) - len(defaults) + i
parsed[index] = parsed[index] + '=' + value
if varargs:
parsed.append(varargs)
return parsed
class VariableIterator(object):
def __init__(self, varfile):
self._variable_mapping = {}
self._variables = []
TsvReader().read(varfile, self)
def __iter__(self):
while self._variables:
data = self._variables.pop(0)
values = {}
for key in self._variable_mapping:
values[key] = data[self._variable_mapping[key]]
yield values
def start_table(self, name):
return name.lower().strip() == 'variables'
def add_row(self, row):
if not self._variable_mapping:
for pos in range(len(row)):
self._variable_mapping[row[pos]] = pos
else:
self._variables.append(row)
class AbstractFileWriter(object):
def __init__(self, path, cols):
self._output = open(path, 'wb')
self._cols = cols
self._tc_name = None
self._uk_name = None
def start_settings(self):
self._write_header_row(['Setting', 'Value'])
def end_settings(self):
self._write_empty_row()
def start_variables(self):
self._write_header_row(['Variable', 'Value'])
def end_variables(self):
self._write_empty_row()
def start_testcases(self):
self._write_header_row(['Test Case', 'Action', 'Argument'])
def end_testcases(self):
self._write_empty_row()
def start_testcase(self, testcase):
self._tc_name = testcase.name
def end_testcase(self):
if self._tc_name:
self._write_normal_row([self._tc_name])
self._tc_name = None
self._write_empty_row()
def start_keywords(self):
self._write_header_row(['Keyword', 'Action', 'Argument'])
def end_keywords(self):
self._write_empty_row()
self._output.close()
def start_keyword(self, userkeyword):
self._uk_name = userkeyword.name
def end_keyword(self):
if self._uk_name:
self._write_normal_row([self._uk_name])
self._uk_name = None
self._write_empty_row()
def setting(self, name, value):
if self._tc_name is None and self._uk_name is None:
self._write_normal_row([name] + value)
else: # TC and UK settings
row = [self._get_tc_or_uk_name(), '[%s]' % name] + value
self._write_normal_row(row, indent=1)
def variable(self, name, value):
self._write_normal_row([name] + value)
def keyword(self, keyword):
name = self._get_tc_or_uk_name()
# TODO: When adding support for PARALLEL, FOR, etc. need to use
# different indent when inside indented block
self._write_normal_row([name] + keyword, indent=1)
def _write_header_row(self, row):
row += [row[-1]] * (self._cols - len(row))
self._write_header_row_impl(row)
def _write_normal_row(self, row, indent=0):
firstrow = True
while True:
if firstrow:
current = row[:self._cols]
row = row[self._cols:]
firstrow = False
else:
current = ['']*indent + ['...'] + row[:self._cols-indent-1]
row = row[self._cols-indent-1:]
self._escape_empty_trailing_cells(current)
current += [''] * (self._cols - len(current))
self._write_normal_row_impl(current)
if not row:
break
def _write_empty_row(self):
self._write_normal_row([])
def _escape_empty_trailing_cells(self, row):
if len(row) > 0 and row[-1] == '':
row[-1] = '\\'
def _get_title(self, path):
dire, base = os.path.split(path)
if base.lower() == '__init__.html':
path = dire
return utils.printable_name_from_path(path)
def _write_header_row_impl(self, row):
raise NotImplementedError
def _write_normal_row_impl(self, row):
raise NotImplementedError
class TsvFileWriter(AbstractFileWriter):
def __init__(self, path):
AbstractFileWriter.__init__(self, path, 8)
self._writer = csv.writer(self._output, dialect='excel-tab')
def _write_header_row_impl(self, row):
self._writer.writerow(['*%s*' % cell for cell in row])
def _write_normal_row_impl(self, row):
self._writer.writerow([cell.encode('UTF-8') for cell in row])
def _get_tc_or_uk_name(self):
if self._tc_name:
name = self._tc_name
self._tc_name = ''
elif self._uk_name:
name = self._uk_name
self._uk_name = ''
else:
name = ''
return name
def generate_suite(cliargs):
opts, (varfile, templatefile, outfile) = _process_args(cliargs)
suite = TestGeneratingSuite(templatefile)
vars = VariableIterator(open(varfile))
if not outfile.endswith('tsv'):
outfile = outfile + '.tsv'
suite.serialize(vars, TsvFileWriter(outfile))
def _process_args(cliargs):
ap = utils.ArgumentParser(__doc__, arg_limits=(3, sys.maxint))
try:
opts, paths = ap.parse_args(cliargs, help='help', check_args=True)
except Information, msg:
exit(msg=str(msg))
except DataError, err:
exit(error=str(err))
return opts, paths
def exit(rc=0, error=None, msg=None):
if error:
print error, "\n\nUse '--help' option to get usage information."
if rc == 0:
rc = 255
if msg:
print msg
rc = 1
sys.exit(rc)
if __name__ == '__main__':
generate_suite(sys.argv[1:])
| Python |
#!/usr/bin/env python
"""Packaging script for Robot Framework
Usage: package.py command version_number [release_tag]
Argument 'command' can have one of the following values:
- sdist : create source distribution
- wininst : create Windows installer
- all : create both packages
- version : update only version information in 'src/robot/version.py'
- jar : create stand-alone jar file containing RF and Jython
'version_number' must be a version number in format '2.x(.y)', 'trunk' or
'keep'. With 'keep', version information is not updated.
'release_tag' must be either 'alpha', 'beta', 'rc' or 'final', where all but
the last one can have a number after the name like 'alpha1' or 'rc2'. When
'version_number' is 'trunk', 'release_tag' is automatically assigned to the
current date.
When creating the jar distribution, jython.jar must be placed in 'ext-lib'
directory, under the project root.
This script uses 'setup.py' internally. Distribution packages are created
under 'dist' directory, which is deleted initially. Depending on your system,
you may need to run this script with administrative rights (e.g. with 'sudo').
Examples:
package.py sdist 2.0 final
package.py wininst keep
package.py all 2.1.13 alpha
package.py sdist trunk
package.py version trunk
"""
from __future__ import with_statement
import sys
import os
from os.path import abspath, dirname, exists, join
import shutil
import re
import time
import subprocess
import zipfile
from glob import glob
import urllib
ROOT_PATH = abspath(dirname(__file__))
DIST_PATH = join(ROOT_PATH, 'dist')
BUILD_PATH = join(ROOT_PATH, 'build')
ROBOT_PATH = join(ROOT_PATH, 'src', 'robot')
JAVA_SRC = join(ROOT_PATH, 'src', 'java', 'org', 'robotframework')
JYTHON_VERSION = '2.5.3'
SETUP_PATH = join(ROOT_PATH, 'setup.py')
BITMAP = join(ROOT_PATH, 'robot.bmp')
INSTALL_SCRIPT = 'robot_postinstall.py'
VERSION_PATH = join(ROBOT_PATH, 'version.py')
POM_PATH = join(ROOT_PATH, 'pom.xml')
VERSIONS = [re.compile('^2\.\d+(\.\d+)?$'), 'trunk', 'keep']
RELEASES = [re.compile('^a\d*$'), re.compile('^b\d*$'),
re.compile('^rc\d*$'), 'final']
VERSION_CONTENT = """# Automatically generated by 'package.py' script.
import sys
VERSION = '%(version_number)s'
RELEASE = '%(release_tag)s'
TIMESTAMP = '%(timestamp)s'
def get_version(sep=' '):
if RELEASE == 'final':
return VERSION
return VERSION + sep + RELEASE
def get_full_version(who=''):
sys_version = sys.version.split()[0]
version = '%%s %%s (%%s %%s on %%s)' \\
%% (who, get_version(), _get_interpreter(), sys_version, sys.platform)
return version.strip()
def _get_interpreter():
if sys.platform.startswith('java'):
return 'Jython'
if sys.platform == 'cli':
return 'IronPython'
if 'PyPy' in sys.version:
return 'PyPy'
return 'Python'
"""
def sdist(*version_info):
version(*version_info)
_clean()
_create_sdist()
_announce()
def wininst(*version_info):
version(*version_info)
_clean()
if _verify_platform(*version_info):
_create_wininst()
_announce()
def all(*version_info):
version(*version_info)
_clean()
_create_sdist()
if _verify_platform(*version_info):
_create_wininst()
_announce()
def version(version_number, release_tag=None):
_verify_version(version_number, VERSIONS)
if version_number == 'keep':
_keep_version()
elif version_number =='trunk':
_update_version(version_number, '%d%02d%02d' % time.localtime()[:3])
else:
_update_version(version_number, _verify_version(release_tag, RELEASES))
sys.path.insert(0, ROBOT_PATH)
from version import get_version
return get_version(sep='')
def _verify_version(given, valid):
for item in valid:
if given == item or (hasattr(item, 'search') and item.search(given)):
return given
raise ValueError
def _update_version(version_number, release_tag):
timestamp = '%d%02d%02d-%02d%02d%02d' % time.localtime()[:6]
vfile = open(VERSION_PATH, 'wb')
vfile.write(VERSION_CONTENT % locals())
vfile.close()
# TODO: Fix before next final release
#_update_pom_version(version_number, release_tag)
print 'Updated version to %s %s' % (version_number, release_tag)
def _update_pom_version(version_number, release_tag):
version = '%s-%s' % (version_number, release_tag)
pom_content = open(POM_PATH).read()
with open(POM_PATH, 'w') as pom_file:
pom_file.write(re.sub('(<version>).*(</version>)',
'\\1%s\\2' % version, pom_content))
def _keep_version():
sys.path.insert(0, ROBOT_PATH)
from version import get_version
print 'Keeping version %s' % get_version()
def _clean():
print 'Cleaning up...'
for path in [DIST_PATH, BUILD_PATH]:
if exists(path):
shutil.rmtree(path)
def _verify_platform(version_number, release_tag=None):
if release_tag == 'final' and os.sep != '\\':
print 'Final Windows installers can only be created in Windows.'
print 'Windows installer was not created.'
return False
return True
def _create_sdist():
_create('sdist --force-manifest', 'source distribution')
def _create_wininst():
_create('bdist_wininst --bitmap %s --install-script %s' % (BITMAP, INSTALL_SCRIPT),
'Windows installer')
if os.sep != '\\':
print 'Warning: Windows installers created on other platforms may not'
print 'be exactly identical to ones created in Windows.'
def _create(command, name):
print 'Creating %s...' % name
rc = os.system('%s %s %s' % (sys.executable, SETUP_PATH, command))
if rc != 0:
print 'Creating %s failed.' % name
sys.exit(rc)
print '%s created successfully.' % name.capitalize()
def _announce():
print 'Created:'
for path in os.listdir(DIST_PATH):
print abspath(join(DIST_PATH, path))
def jar(*version_info):
jython_jar = _get_jython_jar()
print 'Using Jython %s' % jython_jar
ver = version(*version_info)
tmpdir = _create_tmpdir()
try:
_compile_java_classes(tmpdir, jython_jar)
_unzip_jython_jar(tmpdir, jython_jar)
_copy_robot_files(tmpdir)
_compile_all_py_files(tmpdir, jython_jar)
_overwrite_manifest(tmpdir, ver)
try:
jar_path = _create_jar_file(tmpdir, ver)
print 'Created %s based on %s' % (jar_path, jython_jar)
except subprocess.CalledProcessError:
print "Unable to create jar! Check for jar command available at the command line."
except subprocess.CalledProcessError:
print "Unable to compile java classes! Check for javac command available at the command line."
shutil.rmtree(tmpdir)
def _get_jython_jar():
lib_dir = join(ROOT_PATH, 'ext-lib')
jar_path = join(lib_dir, 'jython-standalone-%s.jar' % JYTHON_VERSION)
if os.path.exists(jar_path):
return jar_path
if not os.path.exists(lib_dir):
os.mkdir(lib_dir)
dl_url = "http://search.maven.org/remotecontent?filepath=org/python/jython-standalone/%s/jython-standalone-%s.jar" \
% (JYTHON_VERSION, JYTHON_VERSION)
print 'Jython not found, going to download from %s' % dl_url
urllib.urlretrieve(dl_url, jar_path)
return jar_path
def _compile_java_classes(tmpdir, jython_jar):
source_files = [join(JAVA_SRC, f)
for f in os.listdir(JAVA_SRC) if f.endswith('.java')]
print 'Compiling %d source files' % len(source_files)
subprocess.check_call(['javac', '-d', tmpdir, '-target', '1.5', '-source', '1.5',
'-cp', jython_jar] + source_files, shell=os.name=='nt')
def _create_tmpdir():
tmpdir = join(ROOT_PATH, 'tmp-jar-dir')
if exists(tmpdir):
shutil.rmtree(tmpdir)
os.mkdir(tmpdir)
return tmpdir
def _unzip_jython_jar(tmpdir, jython_jar):
zipfile.ZipFile(jython_jar).extractall(tmpdir)
def _copy_robot_files(tmpdir):
# pyc files must be excluded so that compileall works properly.
todir = join(tmpdir, 'Lib', 'robot')
shutil.copytree(ROBOT_PATH, todir, ignore=shutil.ignore_patterns('*.pyc'))
shutil.rmtree(join(todir, 'htmldata', 'testdata'))
def _compile_all_py_files(tmpdir, jython_jar):
subprocess.check_call(['java', '-jar', jython_jar, '-m', 'compileall', tmpdir])
# Jython will not work without its py-files, but robot will
for root, _, files in os.walk(join(tmpdir,'Lib','robot')):
for f in files:
if f.endswith('.py'):
os.remove(join(root, f))
def _overwrite_manifest(tmpdir, version):
with open(join(tmpdir, 'META-INF', 'MANIFEST.MF'), 'w') as mf:
mf.write('''Manifest-Version: 1.0
Main-Class: org.robotframework.RobotFramework
Specification-Version: 2
Implementation-Version: %s
''' % version)
def _create_jar_file(source, version):
path = join(DIST_PATH, 'robotframework-%s.jar' % version)
if not exists(DIST_PATH):
os.mkdir(DIST_PATH)
_fill_jar(source, path)
return path
def _fill_jar(sourcedir, jarpath):
subprocess.check_call(['jar', 'cvfM', jarpath, '.'], cwd=sourcedir,
shell=os.name=='nt')
if __name__ == '__main__':
try:
globals()[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError, ValueError):
print __doc__
| Python |
from distutils.core import setup
import py2exe
setup(console=['uploadr.py'])
| Python |
"""
flickr.py
Copyright 2004-2006 James Clarke <james@jamesclarke.info>
Portions Copyright 2007-2008 Joshua Henderson <joshhendo@gmail.com>
THIS SOFTWARE IS SUPPLIED WITHOUT WARRANTY OF ANY KIND, AND MAY BE
COPIED, MODIFIED OR DISTRIBUTED IN ANY WAY, AS LONG AS THIS NOTICE
AND ACKNOWLEDGEMENT OF AUTHORSHIP REMAIN.
2007-12-17
For an upto date TODO list, please see:
http://code.google.com/p/flickrpy/wiki/TodoList
For information on how to use the Authentication
module, plese see:
http://code.google.com/p/flickrpy/wiki/UserAuthentication
2006-12-19
Applied patches from Berco Beute and Wolfram Kriesing.
2009-05-05
Peter Kolarov - fixed doPost and added Delete to Photo
"""
__author__ = "James Clarke <james@jamesclarke.info>"
__version__ = "$Rev: 42 $"
__date__ = "$Date: 2008-11-19 14:36:57 +0100 (Wed, 19 Nov 2008) $"
__copyright__ = "Copyright: 2004-2006 James Clarke; Portions: 2007-2008 Joshua Henderson"
from urllib import urlencode, urlopen
from xml.dom import minidom
import hashlib
import os
HOST = 'http://flickr.com'
API = '/services/rest'
# set these here or using flickr.API_KEY in your application
API_KEY = ''
API_SECRET = ''
email = None
password = None
AUTH = False
# The next 2 variables are only importatnt if authentication is used
# this can be set here or using flickr.tokenPath in your application
# this is the path to the folder containing tokenFile (default: token.txt)
tokenPath = ''
# this can be set here or using flickr.tokenFile in your application
# this is the name of the file containing the stored token.
tokenFile = 'token.txt'
class FlickrError(Exception): pass
class Photo(object):
"""Represents a Flickr Photo."""
__readonly = ['id', 'secret', 'server', 'isfavorite', 'license', 'rotation',
'owner', 'dateposted', 'datetaken', 'takengranularity',
'title', 'description', 'ispublic', 'isfriend', 'isfamily',
'cancomment', 'canaddmeta', 'comments', 'tags', 'permcomment',
'permaddmeta']
#XXX: Hopefully None won't cause problems
def __init__(self, id, owner=None, dateuploaded=None, \
title=None, description=None, ispublic=None, \
isfriend=None, isfamily=None, cancomment=None, \
canaddmeta=None, comments=None, tags=None, secret=None, \
isfavorite=None, server=None, license=None, rotation=None):
"""Must specify id, rest is optional."""
self.__loaded = False
self.__cancomment = cancomment
self.__canaddmeta = canaddmeta
self.__comments = comments
self.__dateuploaded = dateuploaded
self.__description = description
self.__id = id
self.__license = license
self.__isfamily = isfamily
self.__isfavorite = isfavorite
self.__isfriend = isfriend
self.__ispublic = ispublic
self.__owner = owner
self.__rotation = rotation
self.__secret = secret
self.__server = server
self.__tags = tags
self.__title = title
self.__dateposted = None
self.__datetaken = None
self.__takengranularity = None
self.__permcomment = None
self.__permaddmeta = None
def __setattr__(self, key, value):
if key in self.__class__.__readonly:
raise AttributeError("The attribute %s is read-only." % key)
else:
super(Photo, self).__setattr__(key, value)
def __getattr__(self, key):
if not self.__loaded:
self._load_properties()
if key in self.__class__.__readonly:
return super(Photo, self).__getattribute__("_%s__%s" % (self.__class__.__name__, key))
else:
return super(Photo, self).__getattribute__(key)
def _load_properties(self):
"""Loads the properties from Flickr."""
self.__loaded = True
method = 'flickr.photos.getInfo'
data = _doget(method, photo_id=self.id)
photo = data.rsp.photo
self.__secret = photo.secret
self.__server = photo.server
self.__isfavorite = photo.isfavorite
self.__license = photo.license
self.__rotation = photo.rotation
owner = photo.owner
self.__owner = User(owner.nsid, username=owner.username,\
realname=owner.realname,\
location=owner.location)
self.__title = photo.title.text
self.__description = photo.description.text
self.__ispublic = photo.visibility.ispublic
self.__isfriend = photo.visibility.isfriend
self.__isfamily = photo.visibility.isfamily
self.__dateposted = photo.dates.posted
self.__datetaken = photo.dates.taken
self.__takengranularity = photo.dates.takengranularity
self.__cancomment = photo.editability.cancomment
self.__canaddmeta = photo.editability.canaddmeta
self.__comments = photo.comments.text
try:
self.__permcomment = photo.permissions.permcomment
self.__permaddmeta = photo.permissions.permaddmeta
except AttributeError:
self.__permcomment = None
self.__permaddmeta = None
#TODO: Implement Notes?
if hasattr(photo.tags, "tag"):
if isinstance(photo.tags.tag, list):
self.__tags = [Tag(tag.id, User(tag.author), tag.raw, tag.text) \
for tag in photo.tags.tag]
else:
tag = photo.tags.tag
self.__tags = [Tag(tag.id, User(tag.author), tag.raw, tag.text)]
def __str__(self):
return '<Flickr Photo %s>' % self.id
def setTags(self, tags):
"""Set the tags for current photo to list tags.
(flickr.photos.settags)
"""
method = 'flickr.photos.setTags'
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
self._load_properties()
def delete(self):
"""Delete photo from flickr
(flickr.photos.delete)
"""
method = 'flickr.photos.delete'
_dopost(method, auth=True, photo_id=self.id)
def addTags(self, tags):
"""Adds the list of tags to current tags. (flickr.photos.addtags)
"""
method = 'flickr.photos.addTags'
if isinstance(tags, list):
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
#load properties again
self._load_properties()
def removeTag(self, tag):
"""Remove the tag from the photo must be a Tag object.
(flickr.photos.removeTag)
"""
method = 'flickr.photos.removeTag'
tag_id = ''
try:
tag_id = tag.id
except AttributeError:
raise FlickrError, "Tag object expected"
_dopost(method, auth=True, photo_id=self.id, tag_id=tag_id)
self._load_properties()
def setMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photos.setMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photo_id=self.id)
self.__title = title
self.__description = description
def getURL(self, size='Medium', urlType='url'):
"""Retrieves a url for the photo. (flickr.photos.getSizes)
urlType - 'url' or 'source'
'url' - flickr page of photo
'source' - image file
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
for psize in data.rsp.sizes.size:
if psize.label == size:
return getattr(psize, urlType)
raise FlickrError, "No URL found"
def getSizes(self):
"""
Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data.
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
ret = []
# The given props are those that we return and the according types, since
# return width and height as string would make "75">"100" be True, which
# is just error prone.
props = {'url':str,'width':int,'height':int,'label':str,'source':str,'text':str}
for psize in data.rsp.sizes.size:
d = {}
for prop,convert_to_type in props.items():
d[prop] = convert_to_type(getattr(psize, prop))
ret.append(d)
return ret
#def getExif(self):
#method = 'flickr.photos.getExif'
#data = _doget(method, photo_id=self.id)
#ret = []
#for exif in data.rsp.photo.exif:
#print exif.label, dir(exif)
##ret.append({exif.label:exif.})
#return ret
##raise FlickrError, "No URL found"
def getLocation(self):
"""
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
"""
method = 'flickr.photos.geo.getLocation'
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # Some other error might have occured too!?
return None
loc = data.rsp.photo.location
return [loc.latitude, loc.longitude]
def getComments(self) :
""""
get list of comments for photo
returns a list of comment objects
comment text is in return [item].text
"""
method = "flickr.photos.comments.getList"
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # ???? what errors might there be????
return None
return data.rsp.comments
class Photoset(object):
"""A Flickr photoset."""
def __init__(self, id, title, primary, photos=0, description='', \
secret='', server=''):
self.__id = id
self.__title = title
self.__primary = primary
self.__description = description
self.__count = photos
self.__secret = secret
self.__server = server
id = property(lambda self: self.__id)
title = property(lambda self: self.__title)
description = property(lambda self: self.__description)
primary = property(lambda self: self.__primary)
def __len__(self):
return self.__count
def __str__(self):
return '<Flickr Photoset %s>' % self.id
def getPhotos(self):
"""Returns list of Photos."""
method = 'flickr.photosets.getPhotos'
data = _doget(method, photoset_id=self.id)
photos = data.rsp.photoset.photo
p = []
for photo in photos:
p.append(Photo(photo.id, title=photo.title, secret=photo.secret, \
server=photo.server))
return p
def editPhotos(self, photos, primary=None):
"""Edit the photos in this set.
photos - photos for set
primary - primary photo (if None will used current)
"""
method = 'flickr.photosets.editPhotos'
if primary is None:
primary = self.primary
ids = [photo.id for photo in photos]
if primary.id not in ids:
ids.append(primary.id)
_dopost(method, auth=True, photoset_id=self.id,\
primary_photo_id=primary.id,
photo_ids=ids)
self.__count = len(ids)
return True
def addPhoto(self, photo):
"""Add a photo to this set.
photo - the photo
"""
method = 'flickr.photosets.addPhoto'
_dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id)
self.__count += 1
return True
def removePhoto(self, photo):
"""Remove the photo from this set.
photo - the photo
"""
method = 'flickr.photosets.removePhoto'
_dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id)
self.__count = self.__count - 1
return True
def editMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photosets.editMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photoset_id=self.id)
self.__title = title
self.__description = description
return True
#XXX: Delete isn't handled well as the python object will still exist
def delete(self):
"""Deletes the photoset.
"""
method = 'flickr.photosets.delete'
_dopost(method, auth=True, photoset_id=self.id)
return True
def create(cls, photo, title, description=''):
"""Create a new photoset.
photo - primary photo
"""
if not isinstance(photo, Photo):
raise TypeError, "Photo expected"
method = 'flickr.photosets.create'
data = _dopost(method, auth=True, title=title,\
description=description,\
primary_photo_id=photo.id)
set = Photoset(data.rsp.photoset.id, title, Photo(photo.id),
photos=1, description=description)
return set
create = classmethod(create)
class User(object):
"""A Flickr user."""
def __init__(self, id, username=None, isadmin=None, ispro=None, \
realname=None, location=None, firstdate=None, count=None):
"""id required, rest optional."""
self.__loaded = False #so we don't keep loading data
self.__id = id
self.__username = username
self.__isadmin = isadmin
self.__ispro = ispro
self.__realname = realname
self.__location = location
self.__photos_firstdate = firstdate
self.__photos_count = count
#property fu
id = property(lambda self: self._general_getattr('id'))
username = property(lambda self: self._general_getattr('username'))
isadmin = property(lambda self: self._general_getattr('isadmin'))
ispro = property(lambda self: self._general_getattr('ispro'))
realname = property(lambda self: self._general_getattr('realname'))
location = property(lambda self: self._general_getattr('location'))
photos_firstdate = property(lambda self: \
self._general_getattr('photos_firstdate'))
photos_firstdatetaken = property(lambda self: \
self._general_getattr\
('photos_firstdatetaken'))
photos_count = property(lambda self: \
self._general_getattr('photos_count'))
icon_server= property(lambda self: self._general_getattr('icon_server'))
icon_url= property(lambda self: self._general_getattr('icon_url'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Load User properties from Flickr."""
method = 'flickr.people.getInfo'
data = _doget(method, user_id=self.__id)
self.__loaded = True
person = data.rsp.person
self.__isadmin = person.isadmin
self.__ispro = person.ispro
self.__icon_server = person.iconserver
if int(person.iconserver) > 0:
self.__icon_url = 'http://photos%s.flickr.com/buddyicons/%s.jpg' \
% (person.iconserver, self.__id)
else:
self.__icon_url = 'http://www.flickr.com/images/buddyicon.jpg'
self.__username = person.username.text
self.__realname = getattr((getattr(person, 'realname', u'')), 'text', u'')
self.__location = getattr((getattr(person, 'location', u'')), 'text', u'')
self.__photos_count = getattr((getattr(getattr(person, 'photos', None), 'count', u'')), 'text', u'')
if self.__photos_count:
self.__photos_firstdate = person.photos.firstdate.text
self.__photos_firstdatetaken = person.photos.firstdatetaken.text
else:
self.__photos_firstdate = None
self.__photos_firstdatetaken = None
def __str__(self):
return '<Flickr User %s>' % self.id
def getPhotosets(self):
"""Returns a list of Photosets."""
method = 'flickr.photosets.getList'
data = _doget(method, user_id=self.id)
sets = []
if not getattr(data.rsp.photosets, 'photoset',None):
return sets #N.B. returns an empty set
if isinstance(data.rsp.photosets.photoset, list):
for photoset in data.rsp.photosets.photoset:
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
else:
photoset = data.rsp.photosets.photoset
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
return sets
def getPublicFavorites(self, per_page='', page=''):
return favorites_getPublicList(user_id=self.id, per_page=per_page, \
page=page)
def getFavorites(self, per_page='', page=''):
return favorites_getList(user_id=self.id, per_page=per_page, \
page=page)
class Group(object):
"""Flickr Group Pool"""
def __init__(self, id, name=None, members=None, online=None,\
privacy=None, chatid=None, chatcount=None):
self.__loaded = False
self.__id = id
self.__name = name
self.__members = members
self.__online = online
self.__privacy = privacy
self.__chatid = chatid
self.__chatcount = chatcount
self.__url = None
id = property(lambda self: self._general_getattr('id'))
name = property(lambda self: self._general_getattr('name'))
members = property(lambda self: self._general_getattr('members'))
online = property(lambda self: self._general_getattr('online'))
privacy = property(lambda self: self._general_getattr('privacy'))
chatid = property(lambda self: self._general_getattr('chatid'))
chatcount = property(lambda self: self._general_getattr('chatcount'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.groups.getInfo'
data = _doget(method, group_id=self.id)
self.__loaded = True
group = data.rsp.group
self.__name = photo.name.text
self.__members = photo.members.text
self.__online = photo.online.text
self.__privacy = photo.privacy.text
self.__chatid = photo.chatid.text
self.__chatcount = photo.chatcount.text
def __str__(self):
return '<Flickr Group %s>' % self.id
def getPhotos(self, tags='', per_page='', page=''):
"""Get a list of photo objects for this group"""
method = 'flickr.groups.pools.getPhotos'
data = _doget(method, group_id=self.id, tags=tags,\
per_page=per_page, page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
def add(self, photo):
"""Adds a Photo to the group"""
method = 'flickr.groups.pools.add'
_dopost(method, auth=True, photo_id=photo.id, group_id=self.id)
return True
def remove(self, photo):
"""Remove a Photo from the group"""
method = 'flickr.groups.pools.remove'
_dopost(method, auth=True, photo_id=photo.id, group_id=self.id)
return True
class Tag(object):
def __init__(self, id, author, raw, text):
self.id = id
self.author = author
self.raw = raw
self.text = text
def __str__(self):
return '<Flickr Tag %s (%s)>' % (self.id, self.text)
#Flickr API methods
#see api docs http://www.flickr.com/services/api/
#for details of each param
#XXX: Could be Photo.search(cls)
def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
min_taken_date='', max_taken_date='', \
license='', per_page='', page='', sort='',\
safe_search='', content_type='' ):
"""Returns a list of Photo objects.
If auth=True then will auth the user. Can see private etc
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, \
min_taken_date=min_taken_date, \
max_taken_date=max_taken_date, \
license=license, per_page=per_page,\
page=page, sort=sort, safe_search=safe_search, \
content_type=content_type, \
tag_mode=tag_mode)
photos = []
if data.rsp.photos.__dict__.has_key('photo'):
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def photos_search_pages(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
min_taken_date='', max_taken_date='', \
license='', per_page='', page='', sort=''):
"""Returns the number of pages for the previous function (photos_search())
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, \
min_taken_date=min_taken_date, \
max_taken_date=max_taken_date, \
license=license, per_page=per_page,\
page=page, sort=sort)
return data.rsp.photos.pages
#XXX: Could be class method in User
def people_findByEmail(email):
"""Returns User object."""
method = 'flickr.people.findByEmail'
data = _doget(method, find_email=email)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def people_findByUsername(username):
"""Returns User object."""
method = 'flickr.people.findByUsername'
data = _doget(method, username=username)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
#XXX: Should probably be in User as a list User.public
def people_getPublicPhotos(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.people.getPublicPhotos'
data = _doget(method, user_id=user_id, per_page=per_page, page=page)
photos = []
if hasattr(data.rsp.photos, "photo"): # Check if there are photos at all (may be been paging too far).
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
#XXX: These are also called from User
def favorites_getList(user_id='', per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getList'
data = _doget(method, auth=True, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def favorites_getPublicList(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getPublicList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def favorites_add(photo_id):
"""Add a photo to the user's favorites."""
method = 'flickr.favorites.add'
_dopost(method, auth=True, photo_id=photo_id)
return True
def favorites_remove(photo_id):
"""Remove a photo from the user's favorites."""
method = 'flickr.favorites.remove'
_dopost(method, auth=True, photo_id=photo_id)
return True
def groups_getPublicGroups():
"""Get a list of groups the auth'd user is a member of."""
method = 'flickr.groups.getPublicGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name)]
return groups
def groups_pools_getGroups():
"""Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name, \
privacy=group.privacy))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name, privacy=group.privacy)]
return groups
def tags_getListUser(user_id=''):
"""Returns a list of tags for the given user (in string format)"""
method = 'flickr.tags.getListUser'
auth = user_id == ''
data = _doget(method, auth=auth, user_id=user_id)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text]
def tags_getListUserPopular(user_id='', count=''):
"""Gets the popular tags for a user in dictionary form tag=>count"""
method = 'flickr.tags.getListUserPopular'
auth = user_id == ''
data = _doget(method, auth=auth, user_id=user_id)
result = {}
if isinstance(data.rsp.tags.tag, list):
for tag in data.rsp.tags.tag:
result[tag.text] = tag.count
else:
result[data.rsp.tags.tag.text] = data.rsp.tags.tag.count
return result
def tags_getrelated(tag):
"""Gets the related tags for given tag."""
method = 'flickr.tags.getRelated'
data = _doget(method, auth=False, tag=tag)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text]
def contacts_getPublicList(user_id):
"""Gets the contacts (Users) for the user_id"""
method = 'flickr.contacts.getPublicList'
data = _doget(method, auth=False, user_id=user_id)
try:
if isinstance(data.rsp.contacts.contact, list):
return [User(user.nsid, username=user.username) \
for user in data.rsp.contacts.contact]
except AttributeError:
return "No users in the list"
except:
return "Unknown error"
# else:
# user = data.rsp.contacts.contact
# return [User(user.nsid, username=user.username)]
def interestingness():
method = 'flickr.interestingness.getList'
data = _doget(method)
photos = []
if isinstance(data.rsp.photos.photo , list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def test_login():
method = 'flickr.test.login'
data = _doget(method, auth=True)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def test_echo():
method = 'flickr.test.echo'
data = _doget(method)
return data.rsp.stat
#useful methods
def _doget(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do get %s" % method
params = _prepare_params(params)
url = '%s%s/?api_key=%s&method=%s&%s%s'% \
(HOST, API, API_KEY, method, urlencode(params),
_get_auth_url_suffix(method, auth, params))
#another useful debug print statement
#print url
return _get_data(minidom.parse(urlopen(url)))
def _dopost(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do post %s" % method
params = _prepare_params(params)
url = '%s%s/?%s' % (HOST, API, _get_auth_url_suffix(method, auth, params))
payload = 'api_key=%s&method=%s&%s'% \
(API_KEY, method, urlencode(params))
#another useful debug print statement
#print url
#print payload
# ret = urlopen(url, payload).read()
# print ret
return _get_data(minidom.parse(urlopen(url, payload)))
def _prepare_params(params):
"""Convert lists to strings with ',' between items."""
for (key, value) in params.items():
if isinstance(value, list):
params[key] = ','.join([item for item in value])
return params
def _get_data(xml):
"""Given a bunch of XML back from Flickr, we turn it into a data structure
we can deal with (after checking for errors)."""
data = unmarshal(xml)
if not data.rsp.stat == 'ok':
msg = "ERROR [%s]: %s" % (data.rsp.err.code, data.rsp.err.msg)
raise FlickrError, msg
return data
def _get_auth_url_suffix(method, auth, params):
"""Figure out whether we want to authorize, and if so, construct a suitable
URL suffix to pass to the Flickr API."""
authentication = False
# auth may be passed in via the API, AUTH may be set globally (in the same
# manner as API_KEY, etc). We do a few more checks than may seem necessary
# because we allow the 'auth' parameter to actually contain the
# authentication token, not just True/False.
if auth or AUTH:
token = userToken()
authentication = True;
elif auth != False:
token = auth;
authentication = True;
elif AUTH != False:
token = AUTH;
authentication = True;
# If we're not authenticating, no suffix is required.
if not authentication:
return ''
paramaters = ['API_KEY', 'method', 'auth_token']
for item in params.items():
paramaters.append(item[0])
paramaters.sort()
api_string = [API_SECRET]
for item in paramaters:
for chocolate in params.items():
if item == chocolate[0]:
api_string.append(item)
api_string.append(str(chocolate[1]))
if item == 'method':
api_string.append('method')
api_string.append(method)
if item == 'API_KEY':
api_string.append('api_key')
api_string.append(API_KEY)
if item == 'auth_token':
api_string.append('auth_token')
api_string.append(token)
api_signature = hashlib.md5(''.join(api_string)).hexdigest()
return '&auth_token=%s&api_sig=%s' % (token, api_signature)
def _parse_photo(photo):
"""Create a Photo object from photo data."""
owner = User(photo.owner)
title = photo.title
ispublic = photo.ispublic
isfriend = photo.isfriend
isfamily = photo.isfamily
secret = photo.secret
server = photo.server
p = Photo(photo.id, owner=owner, title=title, ispublic=ispublic,\
isfriend=isfriend, isfamily=isfamily, secret=secret, \
server=server)
return p
#stolen methods
class Bag: pass
#unmarshal taken and modified from pyamazon.py
#makes the xml easy to work with
def unmarshal(element):
rc = Bag()
if isinstance(element, minidom.Element):
for key in element.attributes.keys():
setattr(rc, key, element.attributes[key].value)
childElements = [e for e in element.childNodes \
if isinstance(e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr(rc, key):
if type(getattr(rc, key)) <> type([]):
setattr(rc, key, [getattr(rc, key)])
setattr(rc, key, getattr(rc, key) + [unmarshal(child)])
elif isinstance(child, minidom.Element) and \
(child.tagName == 'Details'):
# make the first Details element a key
setattr(rc,key,[unmarshal(child)])
#dbg: because otherwise 'hasattr' only tests
#dbg: on the second occurence: if there's a
#dbg: single return to a query, it's not a
#dbg: list. This module should always
#dbg: return a list of Details objects.
else:
setattr(rc, key, unmarshal(child))
else:
#jec: we'll have the main part of the element stored in .text
#jec: will break if tag <text> is also present
text = "".join([e.data for e in element.childNodes \
if isinstance(e, minidom.Text)])
setattr(rc, 'text', text)
return rc
#unique items from a list from the cookbook
def uniq(alist): # Fastest without order preserving
set = {}
map(set.__setitem__, alist, [])
return set.keys()
## Only the "getList" module is complete.
## Work in Progress; Nearly Finished
class Blogs():
def getList(self,auth=True):
"""blogs.getList requires READ authentication"""
# please read documentation on how to use this
method = 'flickr.blogs.getList'
if auth==True : data = _doget(method, auth=True)
if not auth==True : data = _doget(method, auth=False)
bID = []
bName = []
bNeedsPword = []
bURL = []
try:
for plog in data.rsp.blogs.blog:
bID.append(plog.id)
bName.append(plog.name)
bNeedsPword.append(plog.needspassword)
bURL.append(plog.url)
except TypeError:
try:
bID.append(data.rsp.blogs.blog.id)
bName.append(data.rsp.blogs.blog.name)
bNeedsPword.append(data.rsp.blogs.blog.needspassword)
bURL.append(data.rsp.blogs.blog.url)
except AttributeError:
return "AttributeError, unexplained!"
except:
return "Unknown error!"
except AttributeError:
return "There are no blogs!"
myReturn = [bID,bName,bNeedsPword,bURL]
return myReturn
def postPhoto(self, blogID, photoID, title, description, bpassword):
"""blogs.postPhoto requires WRITE authentication"""
method = 'flickr.blogs.postPhoto'
return None
class Urls():
def getUserPhotosURL(userid):
"""Returns user URL in an array (to access, use array[1])"""
method = 'flickr.urls.getUserPhotos'
data = _doget(method, user_id=userid)
return [data.rsp.user.nsid,data.rsp.user.url]
class Auth():
def getFrob(self):
"""Returns a frob that is used in authentication"""
method = 'flickr.auth.getFrob'
sig_str = API_SECRET + 'api_key' + API_KEY + 'method' + method
signature_hash = hashlib.md5(sig_str).hexdigest()
data = _doget(method, auth=False, api_sig=signature_hash)
return data.rsp.frob.text
def loginLink(self, permission, frob):
"""Generates a link that the user should be sent to"""
myAuth = Auth()
sig_str = API_SECRET + 'api_key' + API_KEY + 'frob' + frob + 'perms' + permission
signature_hash = hashlib.md5(sig_str).hexdigest()
perms = permission
link = "http://flickr.com/services/auth/?api_key=%s&perms=%s&frob=%s&api_sig=%s" % (API_KEY, perms, frob, signature_hash)
return link
def getToken(self, frob):
"""This token is what needs to be used in future API calls"""
method = 'flickr.auth.getToken'
sig_str = API_SECRET + 'api_key' + API_KEY + 'frob' + frob + 'method' + method
signature_hash = hashlib.md5(sig_str).hexdigest()
data = _doget(method, auth=False, api_sig=signature_hash,
api_key=API_KEY, frob=frob)
return data.rsp.auth.token.text
def userToken():
# This method allows you flickr.py to retrive the saved token
# as once the token for a program has been got from flickr,
# it cannot be got again, so flickr.py saves it in a file
# called token.txt (default) somewhere.
if not tokenPath == '':
f = file(os.path.join(tokenPath,tokenFile),'r')
else:
f = file(tokenFile,'r')
token = f.read()
f.close()
return token
def getUserPhotosURL(userid):
"""Returns user URL in an array (to access, use array[1])"""
# This addition has been added upon request of
# nsteinmetz. It will be "cleaned up" at another
# time.
method = 'flickr.urls.getUserPhotos'
data = _doget(method, user_id=userid)
userurl = [data.rsp.user.nsid,data.rsp.user.url]
return userurl
if __name__ == '__main__':
print test_echo()
| Python |
#!/usr/bin/env python
import dbhash,anydbm
import sys, time, os, urllib2, shelve, string, logging, flickr, re
import xmltramp, mimetools, mimetypes, md5, webbrowser, exif, flickr2history, tags2set, deleteAll
from ConfigParser import *
import threading, Queue
#
# uploadr.py
#
# Upload images placed within a directory to your Flickr account.
#
# Requires:
# xmltramp http://www.aaronsw.com/2002/xmltramp/
# flickr account http://flickr.com
#
# Inspired by:
# http://micampe.it/things/flickruploadr
#
#
# September 2005
# Cameron Mallory cmallory/berserk.org
#
# This code has been updated to use the new Auth API from flickr.
#
# You may use this code however you see fit in any form whatsoever.
#
# 2009 Peter Kolarov - Updated with fixes and new functionality
#
#
configdict = ConfigParser()
configdict.read('uploadr.ini')
#
# Location to scan for new images
#
IMAGE_DIR = configdict.defaults()['imagedir']
#
# Flickr settings
#
FLICKR = {"title": "",
"description": "",
"tags": "auto-upload",
"is_public": configdict.defaults()['public'],
"is_friend": configdict.defaults()['friend'],
"is_family": configdict.defaults()['family'] }
#
# File we keep the history of uploaded images in.
#
HISTORY_FILE = configdict.defaults()['history_file']
NUM_THREADS = int(configdict.defaults()['num_threads'])
#Kodak cam EXIF tag keyword
XPKEYWORDS = 'Image XPKeywords'
##
## You shouldn't need to modify anything below here
##
FLICKR["secret" ] = "13c314caee8b1f31"
FLICKR["api_key" ] = "91dfde3ed605f6b8b9d9c38886547dcf"
flickr.API_KEY = FLICKR["api_key" ]
flickr.API_SECRET =FLICKR["secret" ]
flickr.tokenFile= ".flickrToken"
flickr.AUTH = True
class APIConstants:
base = "http://flickr.com/services/"
rest = base + "rest/"
auth = base + "auth/"
upload = base + "upload/"
token = "auth_token"
secret = "secret"
key = "api_key"
sig = "api_sig"
frob = "frob"
perms = "perms"
method = "method"
def __init__( self ):
pass
api = APIConstants()
class Uploadr:
token = None
perms = ""
TOKEN_FILE = flickr.tokenFile
def __init__( self ):
self.lock = threading.Lock()
self.token = self.getCachedToken()
"""
Signs args via md5 per http://www.flickr.com/services/api/auth.spec.html (Section 8)
"""
def signCall( self, data):
keys = data.keys()
keys.sort()
foo = ""
for a in keys:
foo += (a + data[a])
f = FLICKR[ api.secret ] + api.key + FLICKR[ api.key ] + foo
#f = api.key + FLICKR[ api.key ] + foo
return md5.new( f ).hexdigest()
def urlGen( self , base,data, sig ):
foo = base + "?"
for d in data:
foo += d + "=" + data[d] + "&"
return foo + api.key + "=" + FLICKR[ api.key ] + "&" + api.sig + "=" + sig
#
# Authenticate user so we can upload images
#
def authenticate( self ):
#print "Getting new Token"
self.getFrob()
self.getAuthKey()
self.getToken()
self.cacheToken()
"""
flickr.auth.getFrob
Returns a frob to be used during authentication. This method call must be
signed.
This method does not require authentication.
Arguments
api.key (Required)
Your API application key. See here for more details.
"""
def getFrob( self ):
d = {
api.method : "flickr.auth.getFrob"
}
sig = self.signCall( d )
url = self.urlGen( api.rest, d, sig )
try:
response = self.getResponse( url )
if ( self.isGood( response ) ):
FLICKR[ api.frob ] = str(response.frob)
else:
self.reportError( response )
except:
print "Error getting frob:" , str( sys.exc_info() )
logging.error(sys.exc_info())
"""
Checks to see if the user has authenticated this application
"""
def getAuthKey( self ):
d = {
api.frob : FLICKR[ api.frob ],
api.perms : "delete"
}
sig = self.signCall( d )
url = self.urlGen( api.auth, d, sig )
ans = ""
try:
webbrowser.open( url )
ans = raw_input("Have you authenticated this application? (Y/N): ")
except:
print str(sys.exc_info())
if ( ans.lower() == "n" ):
print "You need to allow this program to access your Flickr site."
print "A web browser should pop open with instructions."
print "After you have allowed access restart uploadr.py"
sys.exit()
"""
http://www.flickr.com/services/api/flickr.auth.getToken.html
flickr.auth.getToken
Returns the auth token for the given frob, if one has been attached. This method call must be signed.
Authentication
This method does not require authentication.
Arguments
NTC: We need to store the token in a file so we can get it and then check it insted of
getting a new on all the time.
api.key (Required)
Your API application key. See here for more details.
frob (Required)
The frob to check.
"""
def getToken( self ):
d = {
api.method : "flickr.auth.getToken",
api.frob : str(FLICKR[ api.frob ])
}
sig = self.signCall( d )
url = self.urlGen( api.rest, d, sig )
try:
res = self.getResponse( url )
if ( self.isGood( res ) ):
self.token = str(res.auth.token)
self.perms = str(res.auth.perms)
self.cacheToken()
else :
self.reportError( res )
except:
print str( sys.exc_info() )
logging.error(sys.exc_info())
"""
Attempts to get the flickr token from disk.
"""
def getCachedToken( self ):
if ( os.path.exists( self.TOKEN_FILE )):
return open( self.TOKEN_FILE ).read()
else :
return None
def cacheToken( self ):
try:
open( self.TOKEN_FILE , "w").write( str(self.token) )
except:
print "Issue writing token to local cache " , str(sys.exc_info())
logging.error(sys.exc_info())
"""
flickr.auth.checkToken
Returns the credentials attached to an authentication token.
Authentication
This method does not require authentication.
Arguments
api.key (Required)
Your API application key. See here for more details.
auth_token (Required)
The authentication token to check.
"""
def checkToken( self ):
if ( self.token == None ):
return False
else :
d = {
api.token : str(self.token) ,
api.method : "flickr.auth.checkToken"
}
sig = self.signCall( d )
url = self.urlGen( api.rest, d, sig )
try:
res = self.getResponse( url )
if ( self.isGood( res ) ):
self.token = res.auth.token
self.perms = res.auth.perms
return True
else :
self.reportError( res )
except:
print str( sys.exc_info() )
logging.error(sys.exc_info())
return False
def upload( self ):
print HISTORY_FILE
self.uploaded = shelve.open( HISTORY_FILE )
newImages = self.grabNewImages()
imageQueue = Queue.Queue()
for image in newImages:
imageQueue.put_nowait(image)
threads = []
for i in range( NUM_THREADS ):
thread = UploadThread(i, self, imageQueue)
threads.append(thread)
thread.start()
for thrd in threads:
thrd.join()
#get all images in folders and subfolders which match extensions below
def grabNewImages( self ):
images = []
foo = os.walk( IMAGE_DIR )
for data in foo:
(dirpath, dirnames, filenames) = data
for f in filenames :
ext = f.lower().split(".")[-1]
if ext in ("jpg", "jpeg", "gif", "png", "avi", "mov", "mp4"):
images.append( os.path.normpath( dirpath + "/" + f ) )
images.sort()
return images
def has_key(self, folderTag):
with self.lock:
return self.uploaded.has_key(folderTag)
def logUpload( self, photoID, imageName ):
photoID = str( photoID )
imageName = str( imageName )
with self.lock:
self.uploaded[ imageName ] = photoID
self.uploaded[ photoID ] = imageName
self.uploaded.close()
self.uploaded = shelve.open( HISTORY_FILE )
#
#
# build_request/encode_multipart_formdata code is from www.voidspace.org.uk/atlantibots/pythonutils.html
#
#
def build_request(self, theurl, fields, files, txheaders=None):
"""
Given the fields to set and the files to encode it returns a fully formed urllib2.Request object.
You can optionally pass in additional headers to encode into the opject. (Content-type and Content-length will be overridden if they are set).
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, filename, value) elements for data to be uploaded as files.
"""
content_type, body = self.encode_multipart_formdata(fields, files)
if not txheaders: txheaders = {}
txheaders['Content-type'] = content_type
txheaders['Content-length'] = str(len(body))
return urllib2.Request(theurl, body, txheaders)
def encode_multipart_formdata(self,fields, files, BOUNDARY = '-----'+mimetools.choose_boundary()+'-----'):
""" Encodes fields and files for uploading.
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, filename, value) elements for data to be uploaded as files.
Return (content_type, body) ready for urllib2.Request instance
You can optionally pass in a boundary string to use or we'll let mimetools provide one.
"""
CRLF = '\r\n'
L = []
if isinstance(fields, dict):
fields = fields.items()
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
filetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % filetype)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY # XXX what if no files are encoded
return content_type, body
def isGood( self, res ):
if ( not res == "" and res('stat') == "ok" ):
return True
else :
return False
def reportError( self, res ):
logging.error(res)
try:
print "Error:", str( res.err('code') + " " + res.err('msg') )
except:
print "Error: " + str( res )
"""
Send the url and get a response. Let errors float up
"""
def getResponse( self, url ):
xml = urllib2.urlopen( url ).read()
return xmltramp.parse( xml )
class UploadThread (threading.Thread):
def __init__(self, threadID, upl, imageQueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.upl = upl
self.imageQueue = imageQueue
def uploadImage( self, image ):
folderTag = image[len(IMAGE_DIR):]
if ( not self.upl.has_key( folderTag ) ):
try:
logging.debug( "UploadThread %d Getting EXIF for %s" % (self.threadID, image))
f = open(image, 'rb')
exiftags = exif.process_file(f)
f.close()
#print exiftags[XPKEYWORDS]
#print folderTag
#make one tag equal to original file path with spaces replaced by # and start it with # (for easier recognition) since space is used as TAG separator by flickr
# this is needed for later syncing flickr with folders
realTags = folderTag.replace('\\',' ') # look for / or \ or _ or . and replace them with SPACE to make real Tags
realTags = realTags.replace('/',' ') # these will be the real tags ripped from folders
realTags = realTags.replace('_',' ')
realTags = realTags.replace('.',' ')
picTags = '#' + folderTag.replace(' ','#') + ' ' + realTags
if exiftags == {}:
logging.debug( 'UploadThread %d NO_EXIF_HEADER for %s' % (self.threadID, image))
else:
if XPKEYWORDS in exiftags: #look for additional tags in EXIF to tag picture with
if len(exiftags[XPKEYWORDS].printable) > 4:
picTags += exif.make_string( eval(exiftags[XPKEYWORDS].printable)).replace(';',' ')
#print picTags
logging.debug( "UploadThread %d Uploading image %s" % (self.threadID, image))
photo = ('photo', image, open(image,'rb').read())
d = {
api.token : str(self.upl.token),
api.perms : str(self.upl.perms),
"tags" : str(picTags),
"is_public" : str( FLICKR["is_public"] ),
"is_friend" : str( FLICKR["is_friend"] ),
"is_family" : str( FLICKR["is_family"] )
}
sig = self.upl.signCall( d )
d[ api.sig ] = sig
d[ api.key ] = FLICKR[ api.key ]
url = self.upl.build_request(api.upload, d, (photo,))
xml = urllib2.urlopen( url ).read()
res = xmltramp.parse(xml)
if ( self.upl.isGood( res ) ):
logging.debug( "successful.")
self.upl.logUpload( res.photoid, folderTag )
else :
print "problem.."
self.upl.reportError( res )
except:
logging.error(sys.exc_info())
def run(self):
logging.debug("Starting UploadThread %d " % self.threadID)
while True:
try:
image = self.imageQueue.get_nowait()
logging.debug("UploadThread %d qSize: %d processing %s" % (self.threadID, self.imageQueue.qsize(), image))
self.uploadImage(image)
except Queue.Empty:
break
logging.debug("Exiting UploadThread %d " % self.threadID)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='debug.log',
filemode='w')
logging.debug('Started')
console = logging.FileHandler('error.log')
console.setLevel(logging.ERROR)
logging.getLogger('').addHandler(console)
flickr = Uploadr()
if ( not flickr.checkToken() ):
flickr.authenticate()
#see if we need to wipe flickr account first
if(configdict.defaults()['remove_all_pics_first'].startswith('true')):
deleteAll.deleteAllPics()
os._exit(1) ## STOP HERE after deleting all media so user has chance to turn off switch before next start
images = flickr.grabNewImages()
#this is just double checking if everything is on Flickr what is in the history file
# in another words it will restore history file if deleted by comparing flickr with folders
flickr2history.reshelf(images, IMAGE_DIR, HISTORY_FILE, NUM_THREADS)
#uploads all images that are in folders and not in history file
flickr.upload() #uploads all new images to flickr
#this will organize uploaded files into sets with the names according to tags
tags2set.createSets( HISTORY_FILE)
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Library to extract EXIF information from digital camera image files
# http://sourceforge.net/projects/exif-py/
#
# VERSION 1.1.0
#
# To use this library call with:
# f = open(path_name, 'rb')
# tags = EXIF.process_file(f)
#
# To ignore MakerNote tags, pass the -q or --quick
# command line arguments, or as
# tags = EXIF.process_file(f, details=False)
#
# To stop processing after a certain tag is retrieved,
# pass the -t TAG or --stop-tag TAG argument, or as
# tags = EXIF.process_file(f, stop_tags=('TAG1','TAG2'))
#
# where TAG is a valid tag name, ex 'DateTimeOriginal'
#
# These 2 are useful when you are retrieving a large list of images
#
#
# To return an error on invalid tags,
# pass the -s or --strict argument, or as
# tags = EXIF.process_file(f, strict=True)
#
# Otherwise these tags will be ignored
#
# Returned tags will be a dictionary mapping names of EXIF tags to their
# values in the file named by path_name. You can process the tags
# as you wish. In particular, you can iterate through all the tags with:
# for tag in tags.keys():
# if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename',
# 'EXIF MakerNote'):
# print "Key: %s, value %s" % (tag, tags[tag])
# (This code uses the if statement to avoid printing out a few of the
# tags that tend to be long or boring.)
#
# The tags dictionary will include keys for all of the usual EXIF
# tags, and will also include keys for Makernotes used by some
# cameras, for which we have a good specification.
#
# Note that the dictionary keys are the IFD name followed by the
# tag name. For example:
# 'EXIF DateTimeOriginal', 'Image Orientation', 'MakerNote FocusMode'
#
# Copyright (c) 2002-2007 Gene Cash All rights reserved
# Copyright (c) 2007-2008 Ianaré Sévi All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# ----- See 'changes.txt' file for all contributors and changes ----- #
#
# Don't throw an exception when given an out of range character.
def make_string(seq):
str = ''
for c in seq:
# Screen out non-printing characters
if 32 <= c and c < 256:
str += chr(c)
# If no printing chars
if not str:
return seq
return str
# Special version to deal with the code in the first 8 bytes of a user comment.
# First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode
def make_string_uc(seq):
code = seq[0:8]
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string(seq)
# field type descriptions as (length, abbreviation, full name) tuples
FIELD_TYPES = (
(0, 'X', 'Proprietary'), # no such type
(1, 'B', 'Byte'),
(1, 'A', 'ASCII'),
(2, 'S', 'Short'),
(4, 'L', 'Long'),
(8, 'R', 'Ratio'),
(1, 'SB', 'Signed Byte'),
(1, 'U', 'Undefined'),
(2, 'SS', 'Signed Short'),
(4, 'SL', 'Signed Long'),
(8, 'SR', 'Signed Ratio'),
)
# dictionary of main EXIF tag names
# first element of tuple is tag name, optional second element is
# another dictionary giving names to values
EXIF_TAGS = {
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed'}),
0x0106: ('PhotometricInterpretation', ),
0x0107: ('Thresholding', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation',
{1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CCW'}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x011D: ('PageName', make_string),
0x0128: ('ResolutionUnit',
{1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'}),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0156: ('TransferRange', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0214: ('ReferenceBlackWhite', ),
0x4746: ('Rating', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ),
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram',
{0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', ),
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
0x9000: ('ExifVersion', make_string),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration',
{0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode',
{0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot',
5: 'Pattern'}),
0x9208: ('LightSource',
{0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten',
9: 'Fine Weather',
10: 'Flash',
11: 'Shade',
12: 'Daylight Fluorescent',
13: 'Day White Fluorescent',
14: 'Cool White Fluorescent',
15: 'White Fluorescent',
17: 'Standard Light A',
18: 'Standard Light B',
19: 'Standard Light C',
20: 'D55',
21: 'D65',
22: 'D75',
255: 'Other'}),
0x9209: ('Flash',
{0: 'No',
1: 'Fired',
5: 'Fired (?)', # no return sensed
7: 'Fired (!)', # return sensed
9: 'Fill Fired',
13: 'Fill Fired (?)',
15: 'Fill Fired (!)',
16: 'Off',
24: 'Auto Off',
25: 'Auto Fired',
29: 'Auto Fired (?)',
31: 'Auto Fired (!)',
32: 'Not Available'}),
0x920A: ('FocalLength', ),
0x9214: ('SubjectArea', ),
0x927C: ('MakerNote', ),
0x9286: ('UserComment', make_string_uc),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# used by Windows Explorer
0x9C9B: ('XPTitle', ),
0x9C9C: ('XPComment', ),
0x9C9D: ('XPAuthor', ), #(ignored by Windows Explorer if Artist exists)
0x9C9E: ('XPKeywords', ),
0x9C9F: ('XPSubject', ),
0xA000: ('FlashPixVersion', make_string),
0xA001: ('ColorSpace',
{1: 'sRGB',
2: 'Adobe RGB',
65535: 'Uncalibrated'}),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA005: ('InteroperabilityOffset', ),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C
0xA20E: ('FocalPlaneXResolution', ), # 0x920E
0xA20F: ('FocalPlaneYResolution', ), # 0x920F
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210
0xA214: ('SubjectLocation', ), # 0x9214
0xA215: ('ExposureIndex', ), # 0x9215
0xA217: ('SensingMethod', # 0x9217
{1: 'Not defined',
2: 'One-chip color area',
3: 'Two-chip color area',
4: 'Three-chip color area',
5: 'Color sequential area',
7: 'Trilinear',
8: 'Color sequential linear'}),
0xA300: ('FileSource',
{1: 'Film Scanner',
2: 'Reflection Print Scanner',
3: 'Digital Camera'}),
0xA301: ('SceneType',
{1: 'Directly Photographed'}),
0xA302: ('CVAPattern', ),
0xA401: ('CustomRendered',
{0: 'Normal',
1: 'Custom'}),
0xA402: ('ExposureMode',
{0: 'Auto Exposure',
1: 'Manual Exposure',
2: 'Auto Bracket'}),
0xA403: ('WhiteBalance',
{0: 'Auto',
1: 'Manual'}),
0xA404: ('DigitalZoomRatio', ),
0xA405: ('FocalLengthIn35mmFilm', ),
0xA406: ('SceneCaptureType',
{0: 'Standard',
1: 'Landscape',
2: 'Portrait',
3: 'Night)'}),
0xA407: ('GainControl',
{0: 'None',
1: 'Low gain up',
2: 'High gain up',
3: 'Low gain down',
4: 'High gain down'}),
0xA408: ('Contrast',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA409: ('Saturation',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40A: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40B: ('DeviceSettingDescription', ),
0xA40C: ('SubjectDistanceRange', ),
0xA500: ('Gamma', ),
0xC4A5: ('PrintIM', ),
0xEA1C: ('Padding', ),
}
# interoperability tags
INTR_TAGS = {
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
# GPS tags (not used yet, haven't seen camera with GPS)
GPS_TAGS = {
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', ),
0x001D: ('GPSDate', ),
}
# Ignore these tags when quick processing
# 0x927C is MakerNote Tags
# 0x9286 is user comment
IGNORE_TAGS=(0x9286, 0x927C)
# http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp
def nikon_ev_bias(seq):
# First digit seems to be in steps of 1/6 EV.
# Does the third value mean the step size? It is usually 6,
# but it is 12 for the ExposureDifference.
#
# Check for an error condition that could cause a crash.
# This only happens if something has gone really wrong in
# reading the Nikon MakerNote.
if len( seq ) < 4 : return ""
#
if seq == [252, 1, 6, 0]:
return "-2/3 EV"
if seq == [253, 1, 6, 0]:
return "-1/2 EV"
if seq == [254, 1, 6, 0]:
return "-1/3 EV"
if seq == [0, 1, 6, 0]:
return "0 EV"
if seq == [2, 1, 6, 0]:
return "+1/3 EV"
if seq == [3, 1, 6, 0]:
return "+1/2 EV"
if seq == [4, 1, 6, 0]:
return "+2/3 EV"
# Handle combinations not in the table.
a = seq[0]
# Causes headaches for the +/- logic, so special case it.
if a == 0:
return "0 EV"
if a > 127:
a = 256 - a
ret_str = "-"
else:
ret_str = "+"
b = seq[2] # Assume third value means the step size
whole = a / b
a = a % b
if whole != 0:
ret_str = ret_str + str(whole) + " "
if a == 0:
ret_str = ret_str + "EV"
else:
r = Ratio(a, b)
ret_str = ret_str + r.__repr__() + " EV"
return ret_str
# Nikon E99x MakerNote Tags
MAKERNOTE_NIKON_NEWER_TAGS={
0x0001: ('MakernoteVersion', make_string), # Sometimes binary
0x0002: ('ISOSetting', make_string),
0x0003: ('ColorMode', ),
0x0004: ('Quality', ),
0x0005: ('Whitebalance', ),
0x0006: ('ImageSharpening', ),
0x0007: ('FocusMode', ),
0x0008: ('FlashSetting', ),
0x0009: ('AutoFlashMode', ),
0x000B: ('WhiteBalanceBias', ),
0x000C: ('WhiteBalanceRBCoeff', ),
0x000D: ('ProgramShift', nikon_ev_bias),
# Nearly the same as the other EV vals, but step size is 1/12 EV (?)
0x000E: ('ExposureDifference', nikon_ev_bias),
0x000F: ('ISOSelection', ),
0x0011: ('NikonPreview', ),
0x0012: ('FlashCompensation', nikon_ev_bias),
0x0013: ('ISOSpeedRequested', ),
0x0016: ('PhotoCornerCoordinates', ),
# 0x0017: Unknown, but most likely an EV value
0x0018: ('FlashBracketCompensationApplied', nikon_ev_bias),
0x0019: ('AEBracketCompensationApplied', ),
0x001A: ('ImageProcessing', ),
0x001B: ('CropHiSpeed', ),
0x001D: ('SerialNumber', ), # Conflict with 0x00A0 ?
0x001E: ('ColorSpace', ),
0x001F: ('VRInfo', ),
0x0020: ('ImageAuthentication', ),
0x0022: ('ActiveDLighting', ),
0x0023: ('PictureControl', ),
0x0024: ('WorldTime', ),
0x0025: ('ISOInfo', ),
0x0080: ('ImageAdjustment', ),
0x0081: ('ToneCompensation', ),
0x0082: ('AuxiliaryLens', ),
0x0083: ('LensType', ),
0x0084: ('LensMinMaxFocalMaxAperture', ),
0x0085: ('ManualFocusDistance', ),
0x0086: ('DigitalZoomFactor', ),
0x0087: ('FlashMode',
{0x00: 'Did Not Fire',
0x01: 'Fired, Manual',
0x07: 'Fired, External',
0x08: 'Fired, Commander Mode ',
0x09: 'Fired, TTL Mode'}),
0x0088: ('AFFocusPosition',
{0x0000: 'Center',
0x0100: 'Top',
0x0200: 'Bottom',
0x0300: 'Left',
0x0400: 'Right'}),
0x0089: ('BracketingMode',
{0x00: 'Single frame, no bracketing',
0x01: 'Continuous, no bracketing',
0x02: 'Timer, no bracketing',
0x10: 'Single frame, exposure bracketing',
0x11: 'Continuous, exposure bracketing',
0x12: 'Timer, exposure bracketing',
0x40: 'Single frame, white balance bracketing',
0x41: 'Continuous, white balance bracketing',
0x42: 'Timer, white balance bracketing'}),
0x008A: ('AutoBracketRelease', ),
0x008B: ('LensFStops', ),
0x008C: ('NEFCurve1', ), # ExifTool calls this 'ContrastCurve'
0x008D: ('ColorMode', ),
0x008F: ('SceneMode', ),
0x0090: ('LightingType', ),
0x0091: ('ShotInfo', ), # First 4 bytes are a version number in ASCII
0x0092: ('HueAdjustment', ),
# ExifTool calls this 'NEFCompression', should be 1-4
0x0093: ('Compression', ),
0x0094: ('Saturation',
{-3: 'B&W',
-2: '-2',
-1: '-1',
0: '0',
1: '1',
2: '2'}),
0x0095: ('NoiseReduction', ),
0x0096: ('NEFCurve2', ), # ExifTool calls this 'LinearizationTable'
0x0097: ('ColorBalance', ), # First 4 bytes are a version number in ASCII
0x0098: ('LensData', ), # First 4 bytes are a version number in ASCII
0x0099: ('RawImageCenter', ),
0x009A: ('SensorPixelSize', ),
0x009C: ('Scene Assist', ),
0x009E: ('RetouchHistory', ),
0x00A0: ('SerialNumber', ),
0x00A2: ('ImageDataSize', ),
# 00A3: unknown - a single byte 0
# 00A4: In NEF, looks like a 4 byte ASCII version number ('0200')
0x00A5: ('ImageCount', ),
0x00A6: ('DeletedImageCount', ),
0x00A7: ('TotalShutterReleases', ),
# First 4 bytes are a version number in ASCII, with version specific
# info to follow. Its hard to treat it as a string due to embedded nulls.
0x00A8: ('FlashInfo', ),
0x00A9: ('ImageOptimization', ),
0x00AA: ('Saturation', ),
0x00AB: ('DigitalVariProgram', ),
0x00AC: ('ImageStabilization', ),
0x00AD: ('Responsive AF', ), # 'AFResponse'
0x00B0: ('MultiExposure', ),
0x00B1: ('HighISONoiseReduction', ),
0x00B7: ('AFInfo', ),
0x00B8: ('FileInfo', ),
# 00B9: unknown
0x0100: ('DigitalICE', ),
0x0103: ('PreviewCompression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed',}),
0x0201: ('PreviewImageStart', ),
0x0202: ('PreviewImageLength', ),
0x0213: ('PreviewYCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0010: ('DataDump', ),
}
MAKERNOTE_NIKON_OLDER_TAGS = {
0x0003: ('Quality',
{1: 'VGA Basic',
2: 'VGA Normal',
3: 'VGA Fine',
4: 'SXGA Basic',
5: 'SXGA Normal',
6: 'SXGA Fine'}),
0x0004: ('ColorMode',
{1: 'Color',
2: 'Monochrome'}),
0x0005: ('ImageAdjustment',
{0: 'Normal',
1: 'Bright+',
2: 'Bright-',
3: 'Contrast+',
4: 'Contrast-'}),
0x0006: ('CCDSpeed',
{0: 'ISO 80',
2: 'ISO 160',
4: 'ISO 320',
5: 'ISO 100'}),
0x0007: ('WhiteBalance',
{0: 'Auto',
1: 'Preset',
2: 'Daylight',
3: 'Incandescent',
4: 'Fluorescent',
5: 'Cloudy',
6: 'Speed Light'}),
}
# decode Olympus SpecialMode tag in MakerNote
def olympus_special_mode(v):
a={
0: 'Normal',
1: 'Unknown',
2: 'Fast',
3: 'Panorama'}
b={
0: 'Non-panoramic',
1: 'Left to right',
2: 'Right to left',
3: 'Bottom to top',
4: 'Top to bottom'}
if v[0] not in a or v[2] not in b:
return v
return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]])
MAKERNOTE_OLYMPUS_TAGS={
# ah HAH! those sneeeeeaky bastids! this is how they get past the fact
# that a JPEG thumbnail is not allowed in an uncompressed TIFF file
0x0100: ('JPEGThumbnail', ),
0x0200: ('SpecialMode', olympus_special_mode),
0x0201: ('JPEGQual',
{1: 'SQ',
2: 'HQ',
3: 'SHQ'}),
0x0202: ('Macro',
{0: 'Normal',
1: 'Macro',
2: 'SuperMacro'}),
0x0203: ('BWMode',
{0: 'Off',
1: 'On'}),
0x0204: ('DigitalZoom', ),
0x0205: ('FocalPlaneDiagonal', ),
0x0206: ('LensDistortionParams', ),
0x0207: ('SoftwareRelease', ),
0x0208: ('PictureInfo', ),
0x0209: ('CameraID', make_string), # print as string
0x0F00: ('DataDump', ),
0x0300: ('PreCaptureFrames', ),
0x0404: ('SerialNumber', ),
0x1000: ('ShutterSpeedValue', ),
0x1001: ('ISOValue', ),
0x1002: ('ApertureValue', ),
0x1003: ('BrightnessValue', ),
0x1004: ('FlashMode', ),
0x1004: ('FlashMode',
{2: 'On',
3: 'Off'}),
0x1005: ('FlashDevice',
{0: 'None',
1: 'Internal',
4: 'External',
5: 'Internal + External'}),
0x1006: ('ExposureCompensation', ),
0x1007: ('SensorTemperature', ),
0x1008: ('LensTemperature', ),
0x100b: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1017: ('RedBalance', ),
0x1018: ('BlueBalance', ),
0x101a: ('SerialNumber', ),
0x1023: ('FlashExposureComp', ),
0x1026: ('ExternalFlashBounce',
{0: 'No',
1: 'Yes'}),
0x1027: ('ExternalFlashZoom', ),
0x1028: ('ExternalFlashMode', ),
0x1029: ('Contrast int16u',
{0: 'High',
1: 'Normal',
2: 'Low'}),
0x102a: ('SharpnessFactor', ),
0x102b: ('ColorControl', ),
0x102c: ('ValidBits', ),
0x102d: ('CoringFilter', ),
0x102e: ('OlympusImageWidth', ),
0x102f: ('OlympusImageHeight', ),
0x1034: ('CompressionRatio', ),
0x1035: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x1036: ('PreviewImageStart', ),
0x1037: ('PreviewImageLength', ),
0x1039: ('CCDScanMode',
{0: 'Interlaced',
1: 'Progressive'}),
0x103a: ('NoiseReduction',
{0: 'Off',
1: 'On'}),
0x103b: ('InfinityLensStep', ),
0x103c: ('NearLensStep', ),
# TODO - these need extra definitions
# http://search.cpan.org/src/EXIFTOOL/Image-ExifTool-6.90/html/TagNames/Olympus.html
0x2010: ('Equipment', ),
0x2020: ('CameraSettings', ),
0x2030: ('RawDevelopment', ),
0x2040: ('ImageProcessing', ),
0x2050: ('FocusInfo', ),
0x3000: ('RawInfo ', ),
}
# 0x2020 CameraSettings
MAKERNOTE_OLYMPUS_TAG_0x2020={
0x0100: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x0101: ('PreviewImageStart', ),
0x0102: ('PreviewImageLength', ),
0x0200: ('ExposureMode',
{1: 'Manual',
2: 'Program',
3: 'Aperture-priority AE',
4: 'Shutter speed priority AE',
5: 'Program-shift'}),
0x0201: ('AELock',
{0: 'Off',
1: 'On'}),
0x0202: ('MeteringMode',
{2: 'Center Weighted',
3: 'Spot',
5: 'ESP',
261: 'Pattern+AF',
515: 'Spot+Highlight control',
1027: 'Spot+Shadow control'}),
0x0300: ('MacroMode',
{0: 'Off',
1: 'On'}),
0x0301: ('FocusMode',
{0: 'Single AF',
1: 'Sequential shooting AF',
2: 'Continuous AF',
3: 'Multi AF',
10: 'MF'}),
0x0302: ('FocusProcess',
{0: 'AF Not Used',
1: 'AF Used'}),
0x0303: ('AFSearch',
{0: 'Not Ready',
1: 'Ready'}),
0x0304: ('AFAreas', ),
0x0401: ('FlashExposureCompensation', ),
0x0500: ('WhiteBalance2',
{0: 'Auto',
16: '7500K (Fine Weather with Shade)',
17: '6000K (Cloudy)',
18: '5300K (Fine Weather)',
20: '3000K (Tungsten light)',
21: '3600K (Tungsten light-like)',
33: '6600K (Daylight fluorescent)',
34: '4500K (Neutral white fluorescent)',
35: '4000K (Cool white fluorescent)',
48: '3600K (Tungsten light-like)',
256: 'Custom WB 1',
257: 'Custom WB 2',
258: 'Custom WB 3',
259: 'Custom WB 4',
512: 'Custom WB 5400K',
513: 'Custom WB 2900K',
514: 'Custom WB 8000K', }),
0x0501: ('WhiteBalanceTemperature', ),
0x0502: ('WhiteBalanceBracket', ),
0x0503: ('CustomSaturation', ), # (3 numbers: 1. CS Value, 2. Min, 3. Max)
0x0504: ('ModifiedSaturation',
{0: 'Off',
1: 'CM1 (Red Enhance)',
2: 'CM2 (Green Enhance)',
3: 'CM3 (Blue Enhance)',
4: 'CM4 (Skin Tones)'}),
0x0505: ('ContrastSetting', ), # (3 numbers: 1. Contrast, 2. Min, 3. Max)
0x0506: ('SharpnessSetting', ), # (3 numbers: 1. Sharpness, 2. Min, 3. Max)
0x0507: ('ColorSpace',
{0: 'sRGB',
1: 'Adobe RGB',
2: 'Pro Photo RGB'}),
0x0509: ('SceneMode',
{0: 'Standard',
6: 'Auto',
7: 'Sport',
8: 'Portrait',
9: 'Landscape+Portrait',
10: 'Landscape',
11: 'Night scene',
13: 'Panorama',
16: 'Landscape+Portrait',
17: 'Night+Portrait',
19: 'Fireworks',
20: 'Sunset',
22: 'Macro',
25: 'Documents',
26: 'Museum',
28: 'Beach&Snow',
30: 'Candle',
35: 'Underwater Wide1',
36: 'Underwater Macro',
39: 'High Key',
40: 'Digital Image Stabilization',
44: 'Underwater Wide2',
45: 'Low Key',
46: 'Children',
48: 'Nature Macro'}),
0x050a: ('NoiseReduction',
{0: 'Off',
1: 'Noise Reduction',
2: 'Noise Filter',
3: 'Noise Reduction + Noise Filter',
4: 'Noise Filter (ISO Boost)',
5: 'Noise Reduction + Noise Filter (ISO Boost)'}),
0x050b: ('DistortionCorrection',
{0: 'Off',
1: 'On'}),
0x050c: ('ShadingCompensation',
{0: 'Off',
1: 'On'}),
0x050d: ('CompressionFactor', ),
0x050f: ('Gradation',
{'-1 -1 1': 'Low Key',
'0 -1 1': 'Normal',
'1 -1 1': 'High Key'}),
0x0520: ('PictureMode',
{1: 'Vivid',
2: 'Natural',
3: 'Muted',
256: 'Monotone',
512: 'Sepia'}),
0x0521: ('PictureModeSaturation', ),
0x0522: ('PictureModeHue?', ),
0x0523: ('PictureModeContrast', ),
0x0524: ('PictureModeSharpness', ),
0x0525: ('PictureModeBWFilter',
{0: 'n/a',
1: 'Neutral',
2: 'Yellow',
3: 'Orange',
4: 'Red',
5: 'Green'}),
0x0526: ('PictureModeTone',
{0: 'n/a',
1: 'Neutral',
2: 'Sepia',
3: 'Blue',
4: 'Purple',
5: 'Green'}),
0x0600: ('Sequence', ), # 2 or 3 numbers: 1. Mode, 2. Shot number, 3. Mode bits
0x0601: ('PanoramaMode', ), # (2 numbers: 1. Mode, 2. Shot number)
0x0603: ('ImageQuality2',
{1: 'SQ',
2: 'HQ',
3: 'SHQ',
4: 'RAW'}),
0x0901: ('ManometerReading', ),
}
MAKERNOTE_CASIO_TAGS={
0x0001: ('RecordingMode',
{1: 'Single Shutter',
2: 'Panorama',
3: 'Night Scene',
4: 'Portrait',
5: 'Landscape'}),
0x0002: ('Quality',
{1: 'Economy',
2: 'Normal',
3: 'Fine'}),
0x0003: ('FocusingMode',
{2: 'Macro',
3: 'Auto Focus',
4: 'Manual Focus',
5: 'Infinity'}),
0x0004: ('FlashMode',
{1: 'Auto',
2: 'On',
3: 'Off',
4: 'Red Eye Reduction'}),
0x0005: ('FlashIntensity',
{11: 'Weak',
13: 'Normal',
15: 'Strong'}),
0x0006: ('Object Distance', ),
0x0007: ('WhiteBalance',
{1: 'Auto',
2: 'Tungsten',
3: 'Daylight',
4: 'Fluorescent',
5: 'Shade',
129: 'Manual'}),
0x000B: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0x000C: ('Contrast',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x000D: ('Saturation',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x0014: ('CCDSpeed',
{64: 'Normal',
80: 'Normal',
100: 'High',
125: '+1.0',
244: '+3.0',
250: '+2.0'}),
}
MAKERNOTE_FUJIFILM_TAGS={
0x0000: ('NoteVersion', make_string),
0x1000: ('Quality', ),
0x1001: ('Sharpness',
{1: 'Soft',
2: 'Soft',
3: 'Normal',
4: 'Hard',
5: 'Hard'}),
0x1002: ('WhiteBalance',
{0: 'Auto',
256: 'Daylight',
512: 'Cloudy',
768: 'DaylightColor-Fluorescent',
769: 'DaywhiteColor-Fluorescent',
770: 'White-Fluorescent',
1024: 'Incandescent',
3840: 'Custom'}),
0x1003: ('Color',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1004: ('Tone',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1010: ('FlashMode',
{0: 'Auto',
1: 'On',
2: 'Off',
3: 'Red Eye Reduction'}),
0x1011: ('FlashStrength', ),
0x1020: ('Macro',
{0: 'Off',
1: 'On'}),
0x1021: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1030: ('SlowSync',
{0: 'Off',
1: 'On'}),
0x1031: ('PictureMode',
{0: 'Auto',
1: 'Portrait',
2: 'Landscape',
4: 'Sports',
5: 'Night',
6: 'Program AE',
256: 'Aperture Priority AE',
512: 'Shutter Priority AE',
768: 'Manual Exposure'}),
0x1100: ('MotorOrBracket',
{0: 'Off',
1: 'On'}),
0x1300: ('BlurWarning',
{0: 'Off',
1: 'On'}),
0x1301: ('FocusWarning',
{0: 'Off',
1: 'On'}),
0x1302: ('AEWarning',
{0: 'Off',
1: 'On'}),
}
MAKERNOTE_CANON_TAGS = {
0x0006: ('ImageType', ),
0x0007: ('FirmwareVersion', ),
0x0008: ('ImageNumber', ),
0x0009: ('OwnerName', ),
}
# this is in element offset, name, optional value dictionary format
MAKERNOTE_CANON_TAG_0x001 = {
1: ('Macromode',
{1: 'Macro',
2: 'Normal'}),
2: ('SelfTimer', ),
3: ('Quality',
{2: 'Normal',
3: 'Fine',
5: 'Superfine'}),
4: ('FlashMode',
{0: 'Flash Not Fired',
1: 'Auto',
2: 'On',
3: 'Red-Eye Reduction',
4: 'Slow Synchro',
5: 'Auto + Red-Eye Reduction',
6: 'On + Red-Eye Reduction',
16: 'external flash'}),
5: ('ContinuousDriveMode',
{0: 'Single Or Timer',
1: 'Continuous'}),
7: ('FocusMode',
{0: 'One-Shot',
1: 'AI Servo',
2: 'AI Focus',
3: 'MF',
4: 'Single',
5: 'Continuous',
6: 'MF'}),
10: ('ImageSize',
{0: 'Large',
1: 'Medium',
2: 'Small'}),
11: ('EasyShootingMode',
{0: 'Full Auto',
1: 'Manual',
2: 'Landscape',
3: 'Fast Shutter',
4: 'Slow Shutter',
5: 'Night',
6: 'B&W',
7: 'Sepia',
8: 'Portrait',
9: 'Sports',
10: 'Macro/Close-Up',
11: 'Pan Focus'}),
12: ('DigitalZoom',
{0: 'None',
1: '2x',
2: '4x'}),
13: ('Contrast',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
14: ('Saturation',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
15: ('Sharpness',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
16: ('ISO',
{0: 'See ISOSpeedRatings Tag',
15: 'Auto',
16: '50',
17: '100',
18: '200',
19: '400'}),
17: ('MeteringMode',
{3: 'Evaluative',
4: 'Partial',
5: 'Center-weighted'}),
18: ('FocusType',
{0: 'Manual',
1: 'Auto',
3: 'Close-Up (Macro)',
8: 'Locked (Pan Mode)'}),
19: ('AFPointSelected',
{0x3000: 'None (MF)',
0x3001: 'Auto-Selected',
0x3002: 'Right',
0x3003: 'Center',
0x3004: 'Left'}),
20: ('ExposureMode',
{0: 'Easy Shooting',
1: 'Program',
2: 'Tv-priority',
3: 'Av-priority',
4: 'Manual',
5: 'A-DEP'}),
23: ('LongFocalLengthOfLensInFocalUnits', ),
24: ('ShortFocalLengthOfLensInFocalUnits', ),
25: ('FocalUnitsPerMM', ),
28: ('FlashActivity',
{0: 'Did Not Fire',
1: 'Fired'}),
29: ('FlashDetails',
{14: 'External E-TTL',
13: 'Internal Flash',
11: 'FP Sync Used',
7: '2nd("Rear")-Curtain Sync Used',
4: 'FP Sync Enabled'}),
32: ('FocusMode',
{0: 'Single',
1: 'Continuous'}),
}
MAKERNOTE_CANON_TAG_0x004 = {
7: ('WhiteBalance',
{0: 'Auto',
1: 'Sunny',
2: 'Cloudy',
3: 'Tungsten',
4: 'Fluorescent',
5: 'Flash',
6: 'Custom'}),
9: ('SequenceNumber', ),
14: ('AFPointUsed', ),
15: ('FlashBias',
{0xFFC0: '-2 EV',
0xFFCC: '-1.67 EV',
0xFFD0: '-1.50 EV',
0xFFD4: '-1.33 EV',
0xFFE0: '-1 EV',
0xFFEC: '-0.67 EV',
0xFFF0: '-0.50 EV',
0xFFF4: '-0.33 EV',
0x0000: '0 EV',
0x000C: '0.33 EV',
0x0010: '0.50 EV',
0x0014: '0.67 EV',
0x0020: '1 EV',
0x002C: '1.33 EV',
0x0030: '1.50 EV',
0x0034: '1.67 EV',
0x0040: '2 EV'}),
19: ('SubjectDistance', ),
}
# extract multibyte integer in Motorola format (little endian)
def s2n_motorola(str):
x = 0
for c in str:
x = (x << 8) | ord(c)
return x
# extract multibyte integer in Intel format (big endian)
def s2n_intel(str):
x = 0
y = 0L
for c in str:
x = x | (ord(c) << y)
y = y + 8
return x
# ratio object that eventually will be able to reduce itself to lowest
# common denominator for printing
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
class Ratio:
def __init__(self, num, den):
self.num = num
self.den = den
def __repr__(self):
self.reduce()
if self.den == 1:
return str(self.num)
return '%d/%d' % (self.num, self.den)
def reduce(self):
div = gcd(self.num, self.den)
if div > 1:
self.num = self.num / div
self.den = self.den / div
# for ease of dealing with tags
class IFD_Tag:
def __init__(self, printable, tag, field_type, values, field_offset,
field_length):
# printable version of data
self.printable = printable
# tag ID number
self.tag = tag
# field type as index into FIELD_TYPES
self.field_type = field_type
# offset of start of field in bytes from beginning of IFD
self.field_offset = field_offset
# length of data field in bytes
self.field_length = field_length
# either a string or array of data items
self.values = values
def __str__(self):
return self.printable
def __repr__(self):
return '(0x%04X) %s=%s @ %d' % (self.tag,
FIELD_TYPES[self.field_type][2],
self.printable,
self.field_offset)
# class that handles an EXIF header
class EXIF_header:
def __init__(self, file, endian, offset, fake_exif, strict, debug=0):
self.file = file
self.endian = endian
self.offset = offset
self.fake_exif = fake_exif
self.strict = strict
self.debug = debug
self.tags = {}
# convert slice to integer, based on sign and endian flags
# usually this offset is assumed to be relative to the beginning of the
# start of the EXIF information. For some cameras that use relative tags,
# this offset may be relative to some other starting point.
def s2n(self, offset, length, signed=0):
self.file.seek(self.offset+offset)
slice=self.file.read(length)
if self.endian == 'I':
val=s2n_intel(slice)
else:
val=s2n_motorola(slice)
# Sign extension ?
if signed:
msb=1L << (8*length-1)
if val & msb:
val=val-(msb << 1)
return val
# convert offset to string
def n2s(self, offset, length):
s = ''
for dummy in range(length):
if self.endian == 'I':
s = s + chr(offset & 0xFF)
else:
s = chr(offset & 0xFF) + s
offset = offset >> 8
return s
# return first IFD
def first_IFD(self):
return self.s2n(4, 4)
# return pointer to next IFD
def next_IFD(self, ifd):
entries=self.s2n(ifd, 2)
return self.s2n(ifd+2+12*entries, 4)
# return list of IFDs in header
def list_IFDs(self):
i=self.first_IFD()
a=[]
while i:
a.append(i)
i=self.next_IFD(i)
return a
# return list of entries in this IFD
def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS, relative=0, stop_tags=()):
entries=self.s2n(ifd, 2)
stop_tags_length = len(stop_tags)
stop_tags_encountered = 0
for i in range(entries):
# entry is index of start of this IFD in the file
entry = ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
# get tag name early to avoid errors, help debug
tag_entry = dict.get(tag)
if tag_entry:
tag_name = tag_entry[0]
else:
tag_name = 'Tag 0x%04X' % tag
# ignore certain tags for faster processing
if not (not detailed and tag in IGNORE_TAGS):
field_type = self.s2n(entry + 2, 2)
# unknown field type
if not 0 < field_type < len(FIELD_TYPES):
if not self.strict:
continue
else:
raise ValueError('unknown type %d in tag 0x%04X' % (field_type, tag))
typelen = FIELD_TYPES[field_type][0]
count = self.s2n(entry + 4, 4)
# Adjust for tag id/type/count (2+2+4 bytes)
# Now we point at either the data or the 2nd level offset
offset = entry + 8
# If the value fits in 4 bytes, it is inlined, else we
# need to jump ahead again.
if count * typelen > 4:
# offset is not the value; it's a pointer to the value
# if relative we set things up so s2n will seek to the right
# place when it adds self.offset. Note that this 'relative'
# is for the Nikon type 3 makernote. Other cameras may use
# other relative offsets, which would have to be computed here
# slightly differently.
if relative:
tmp_offset = self.s2n(offset, 4)
offset = tmp_offset + ifd - 8
if self.fake_exif:
offset = offset + 18
else:
offset = self.s2n(offset, 4)
field_offset = offset
if field_type == 2:
# special case: null-terminated ASCII string
# XXX investigate
# sometimes gets too big to fit in int value
if count != 0 and count < (2**31):
self.file.seek(self.offset + offset)
values = self.file.read(count)
#print values
# Drop any garbage after a null.
values = values.split('\x00', 1)[0]
else:
values = ''
else:
values = []
signed = (field_type in [6, 8, 9, 10])
# XXX investigate
# some entries get too big to handle could be malformed
# file or problem with self.s2n
if count < 1000:
for dummy in range(count):
if field_type in (5, 10):
# a ratio
value = Ratio(self.s2n(offset, 4, signed),
self.s2n(offset + 4, 4, signed))
else:
value = self.s2n(offset, typelen, signed)
values.append(value)
offset = offset + typelen
# The test above causes problems with tags that are
# supposed to have long values! Fix up one important case.
elif tag_name == 'MakerNote' :
for dummy in range(count):
value = self.s2n(offset, typelen, signed)
values.append(value)
offset = offset + typelen
#else :
# print "Warning: dropping large tag:", tag, tag_name
# now 'values' is either a string or an array
if count == 1 and field_type != 2:
printable=str(values[0])
elif count > 50 and len(values) > 20 :
printable=str( values[0:20] )[0:-1] + ", ... ]"
else:
printable=str(values)
# compute printable version of values
if tag_entry:
if len(tag_entry) != 1:
# optional 2nd tag element is present
if callable(tag_entry[1]):
# call mapping function
printable = tag_entry[1](values)
else:
printable = ''
for i in values:
# use lookup table for this tag
printable += tag_entry[1].get(i, repr(i))
self.tags[ifd_name + ' ' + tag_name] = IFD_Tag(printable, tag,
field_type,
values, field_offset,
count * typelen)
if self.debug:
print ' debug: %s: %s' % (tag_name,
repr(self.tags[ifd_name + ' ' + tag_name]))
if tag_name in stop_tags:
stop_tags_encountered += 1
if stop_tags_encountered >= stop_tags_length:
break
# extract uncompressed TIFF thumbnail (like pulling teeth)
# we take advantage of the pre-existing layout in the thumbnail IFD as
# much as possible
def extract_TIFF_thumbnail(self, thumb_ifd):
entries = self.s2n(thumb_ifd, 2)
# this is header plus offset to IFD ...
if self.endian == 'M':
tiff = 'MM\x00*\x00\x00\x00\x08'
else:
tiff = 'II*\x00\x08\x00\x00\x00'
# ... plus thumbnail IFD data plus a null "next IFD" pointer
self.file.seek(self.offset+thumb_ifd)
tiff += self.file.read(entries*12+2)+'\x00\x00\x00\x00'
# fix up large value offset pointers into data area
for i in range(entries):
entry = thumb_ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
field_type = self.s2n(entry+2, 2)
typelen = FIELD_TYPES[field_type][0]
count = self.s2n(entry+4, 4)
oldoff = self.s2n(entry+8, 4)
# start of the 4-byte pointer area in entry
ptr = i * 12 + 18
# remember strip offsets location
if tag == 0x0111:
strip_off = ptr
strip_len = count * typelen
# is it in the data area?
if count * typelen > 4:
# update offset pointer (nasty "strings are immutable" crap)
# should be able to say "tiff[ptr:ptr+4]=newoff"
newoff = len(tiff)
tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr+4:]
# remember strip offsets location
if tag == 0x0111:
strip_off = newoff
strip_len = 4
# get original data and store it
self.file.seek(self.offset + oldoff)
tiff += self.file.read(count * typelen)
# add pixel strips and update strip offset info
old_offsets = self.tags['Thumbnail StripOffsets'].values
old_counts = self.tags['Thumbnail StripByteCounts'].values
for i in range(len(old_offsets)):
# update offset pointer (more nasty "strings are immutable" crap)
offset = self.n2s(len(tiff), strip_len)
tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:]
strip_off += strip_len
# add pixel strip to end
self.file.seek(self.offset + old_offsets[i])
tiff += self.file.read(old_counts[i])
self.tags['TIFFThumbnail'] = tiff
# decode all the camera-specific MakerNote formats
# Note is the data that comprises this MakerNote. The MakerNote will
# likely have pointers in it that point to other parts of the file. We'll
# use self.offset as the starting point for most of those pointers, since
# they are relative to the beginning of the file.
#
# If the MakerNote is in a newer format, it may use relative addressing
# within the MakerNote. In that case we'll use relative addresses for the
# pointers.
#
# As an aside: it's not just to be annoying that the manufacturers use
# relative offsets. It's so that if the makernote has to be moved by the
# picture software all of the offsets don't have to be adjusted. Overall,
# this is probably the right strategy for makernotes, though the spec is
# ambiguous. (The spec does not appear to imagine that makernotes would
# follow EXIF format internally. Once they did, it's ambiguous whether
# the offsets should be from the header at the start of all the EXIF info,
# or from the header at the start of the makernote.)
def decode_maker_note(self):
note = self.tags['EXIF MakerNote']
# Some apps use MakerNote tags but do not use a format for which we
# have a description, so just do a raw dump for these.
#if self.tags.has_key('Image Make'):
make = self.tags['Image Make'].printable
#else:
# make = ''
# model = self.tags['Image Model'].printable # unused
# Nikon
# The maker note usually starts with the word Nikon, followed by the
# type of the makernote (1 or 2, as a short). If the word Nikon is
# not at the start of the makernote, it's probably type 2, since some
# cameras work that way.
if 'NIKON' in make:
if note.values[0:7] == [78, 105, 107, 111, 110, 0, 1]:
if self.debug:
print "Looks like a type 1 Nikon MakerNote."
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_NIKON_OLDER_TAGS)
elif note.values[0:7] == [78, 105, 107, 111, 110, 0, 2]:
if self.debug:
print "Looks like a labeled type 2 Nikon MakerNote"
if note.values[12:14] != [0, 42] and note.values[12:14] != [42L, 0L]:
raise ValueError("Missing marker tag '42' in MakerNote.")
# skip the Makernote label and the TIFF header
self.dump_IFD(note.field_offset+10+8, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS, relative=1)
else:
# E99x or D1
if self.debug:
print "Looks like an unlabeled type 2 Nikon MakerNote"
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS)
return
# Olympus
if make.startswith('OLYMPUS'):
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_OLYMPUS_TAGS)
# XXX TODO
#for i in (('MakerNote Tag 0x2020', MAKERNOTE_OLYMPUS_TAG_0x2020),):
# self.decode_olympus_tag(self.tags[i[0]].values, i[1])
#return
# Casio
if 'CASIO' in make or 'Casio' in make:
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CASIO_TAGS)
return
# Fujifilm
if make == 'FUJIFILM':
# bug: everything else is "Motorola" endian, but the MakerNote
# is "Intel" endian
endian = self.endian
self.endian = 'I'
# bug: IFD offsets are from beginning of MakerNote, not
# beginning of file header
offset = self.offset
self.offset += note.field_offset
# process note with bogus values (note is actually at offset 12)
self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS)
# reset to correct values
self.endian = endian
self.offset = offset
return
# Canon
if make == 'Canon':
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CANON_TAGS)
for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001),
('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)):
self.canon_decode_tag(self.tags[i[0]].values, i[1])
return
# XXX TODO decode Olympus MakerNote tag based on offset within tag
def olympus_decode_tag(self, value, dict):
pass
# decode Canon MakerNote tag based on offset within tag
# see http://www.burren.cx/david/canon.html by David Burren
def canon_decode_tag(self, value, dict):
for i in range(1, len(value)):
x=dict.get(i, ('Unknown', ))
if self.debug:
print i, x
name=x[0]
if len(x) > 1:
val=x[1].get(value[i], 'Unknown')
else:
val=value[i]
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None,
None, None)
# process an image file (expects an open file object)
# this is the function that has to deal with all the arbitrary nasty bits
# of the EXIF standard
def process_file(f, stop_tags=(), details=True, strict=False, debug=False):
# yah it's cheesy...
global detailed
detailed = details
# by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it's a JPEG or TIFF
data = f.read(12)
if data[0:4] in ['II*\x00', 'MM\x00*']:
# it's a TIFF file
f.seek(0)
endian = f.read(1)
f.read(1)
offset = 0
elif data[0:2] == '\xFF\xD8':
# it's a JPEG file
while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM', 'Phot'):
length = ord(data[4])*256+ord(data[5])
f.read(length-8)
# fake an EXIF beginning of file
data = '\xFF\x00'+f.read(10)
fake_exif = 1
if data[2] == '\xFF' and data[6:10] == 'Exif':
# detected EXIF header
offset = f.tell()
endian = f.read(1)
else:
# no EXIF information
return {}
else:
# file format not recognized
return {}
# deal with the EXIF info we found
if debug:
print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format'
hdr = EXIF_header(f, endian, offset, fake_exif, strict, debug)
ifd_list = hdr.list_IFDs()
ctr = 0
for i in ifd_list:
if ctr == 0:
IFD_name = 'Image'
elif ctr == 1:
IFD_name = 'Thumbnail'
thumb_ifd = i
else:
IFD_name = 'IFD %d' % ctr
if debug:
print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i)
hdr.dump_IFD(i, IFD_name, stop_tags=stop_tags)
# EXIF IFD
exif_off = hdr.tags.get(IFD_name+' ExifOffset')
if exif_off:
if debug:
print ' EXIF SubIFD at offset %d:' % exif_off.values[0]
hdr.dump_IFD(exif_off.values[0], 'EXIF', stop_tags=stop_tags)
# Interoperability IFD contained in EXIF IFD
intr_off = hdr.tags.get('EXIF SubIFD InteroperabilityOffset')
if intr_off:
if debug:
print ' EXIF Interoperability SubSubIFD at offset %d:' \
% intr_off.values[0]
hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability',
dict=INTR_TAGS, stop_tags=stop_tags)
# GPS IFD
gps_off = hdr.tags.get(IFD_name+' GPSInfo')
if gps_off:
if debug:
print ' GPS SubIFD at offset %d:' % gps_off.values[0]
hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS, stop_tags=stop_tags)
ctr += 1
# extract uncompressed TIFF thumbnail
thumb = hdr.tags.get('Thumbnail Compression')
if thumb and thumb.printable == 'Uncompressed TIFF':
hdr.extract_TIFF_thumbnail(thumb_ifd)
# JPEG thumbnail (thankfully the JPEG data is stored as a unit)
thumb_off = hdr.tags.get('Thumbnail JPEGInterchangeFormat')
if thumb_off:
f.seek(offset+thumb_off.values[0])
size = hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0]
hdr.tags['JPEGThumbnail'] = f.read(size)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags and detailed:
hdr.decode_maker_note()
# Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote
# since it's not allowed in a uncompressed TIFF IFD
if 'JPEGThumbnail' not in hdr.tags:
thumb_off=hdr.tags.get('MakerNote JPEGThumbnail')
if thumb_off:
f.seek(offset+thumb_off.values[0])
hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length)
return hdr.tags
# show command line usage
def usage(exit_status):
msg = 'Usage: EXIF.py [OPTIONS] file1 [file2 ...]\n'
msg += 'Extract EXIF information from digital camera image files.\n\nOptions:\n'
msg += '-q --quick Do not process MakerNotes.\n'
msg += '-t TAG --stop-tag TAG Stop processing when this tag is retrieved.\n'
msg += '-s --strict Run in strict mode (stop on errors).\n'
msg += '-d --debug Run in debug mode (display extra info).\n'
print msg
sys.exit(exit_status)
# library test/debug function (dump given files)
if __name__ == '__main__':
import sys
import getopt
# parse command line options/arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hqsdt:v", ["help", "quick", "strict", "debug", "stop-tag="])
except getopt.GetoptError:
usage(2)
if args == []:
usage(2)
detailed = True
stop_tags = ()
debug = False
strict = False
for o, a in opts:
if o in ("-h", "--help"):
usage(0)
if o in ("-q", "--quick"):
detailed = False
if o in ("-t", "--stop-tag"):
stop_tags = (a,)
if o in ("-s", "--strict"):
strict = True
if o in ("-d", "--debug"):
debug = True
# output info for each file
for filename in args:
try:
file=open(filename, 'rb')
except:
print "'%s' is unreadable\n"%filename
continue
print filename + ':'
# get the tags
data = process_file(file, stop_tags=stop_tags, details=detailed, strict=strict, debug=debug)
if not data:
print 'No EXIF information found'
continue
x=data.keys()
x.sort()
for i in x:
if i in ('JPEGThumbnail', 'TIFFThumbnail'):
continue
try:
print ' %s (%s): %s' % \
(i, FIELD_TYPES[data[i].field_type][2], data[i].printable)
except:
print 'error', i, '"', data[i], '"'
if 'JPEGThumbnail' in data:
print 'File has JPEG thumbnail'
print
| Python |
#!/usr/bin/python
import dbhash,anydbm
import sys, os, shelve, logging,string
from ConfigParser import *
import flickr
existingSets = None
user = None
configdict = ConfigParser()
configdict.read('uploadr.ini')
onlySubs = configdict.defaults()['only_sub_sets'] #set to true if Sets should be called only by the name of the last subfolder
def creatSet(photoSet, setName):
setName = setName.replace('\\',' ')
setName = setName.replace('/',' ')
setName = string.strip(setName)
photos = [] #real photo objects
for p in photoSet:
photos.append(flickr.Photo(id = p))
fset = None
#check if set with the name exists already
for s in existingSets:
if(s.title == setName):
fset= s
logging.debug('tags2set: Found existing set %s' % setName)
# return
break
try:
if(fset == None):
#print photos[0]
#print setName
fset = flickr.Photoset.create(photos[0], setName, 'auto generated by folders2flickr')
logging.debug('tags2set: Creating new set %s' % setName)
except:
logging.error('tags2set: Cannot create set %s' % setName)
logging.error(sys.exc_info()[0])
try:
fset.editPhotos(photos)
except:
logging.error('tags2set: Cannot edit set %s' % setName)
logging.error(sys.exc_info()[0])
logging.debug('tags2set: ...added %d photos' % len(photos) )
return fset
def createSets( historyFile):
global existingSets
global user
logging.debug('tags2set: Started tags2set')
try:
user = flickr.test_login()
logging.debug(user.id)
existingSets=user.getPhotosets()
except:
logging.error(sys.exc_info()[0])
return None
uploaded = shelve.open( historyFile )
keys = uploaded.keys()
keys.sort()
lastSetName =''
photoSet =[]
setName = ''
for image in keys:
if(image.startswith('\\') or image.startswith('/')): #filter out photoid keys
if(onlySubs.startswith('true')):
head, setName = os.path.split(os.path.dirname(image))
else:
setName = os.path.dirname(image) #set name is realy a directory
if(not lastSetName == setName and not lastSetName == ''):
#new set is starting so save last
#logging.debug( "Creating set %s with %d pictures" % (lastSetName, len(photoSet)) )
creatSet(photoSet, lastSetName)
photoSet = []
logging.debug("tags2set: Adding image %s" % image)
photoSet.append(uploaded.get(image))
lastSetName = setName
#dont forget to create last set
#logging.debug( "Creating set %s with %d pictures" % (setName, len(photoSet)) )
creatSet(photoSet, setName)
| Python |
#!/usr/bin/python
import dbhash,anydbm
import sys, os, shelve, logging,string
from ConfigParser import *
import flickr
existingSets = None
user = None
configdict = ConfigParser()
configdict.read('uploadr.ini')
deleteAll = configdict.defaults()['remove_all_pics_first'] #set to true if Sets should be called only by the name of the last subfolder
def deleteAllPics( ):
global user
try:
user = flickr.test_login()
logging.debug(user.id)
except:
logging.error(sys.exc_info()[0])
return None
if(deleteAll.startswith('true') == False):
return #check again to be sure if to go one
logging.debug('deleteAll: Started Delete')
retries = 0
#this may take very long time !!!!
while (retries < 3):
try:
photos = []
logging.debug(user.id)
np = flickr.photos_search_pages(user_id=user.id, auth=all, per_page="500")
numPages = int(np)
i = 1
logging.debug("found %d num pages" % numPages)
while ( numPages > 0):
spage = str(i)
photos.extend(flickr.photos_search(user_id=user.id, auth=all, per_page="500", page=spage))
logging.debug( "added %d page to %d pic" % (i, len(photos)))
numPages = numPages - 1
i = i + 1
logging.debug( "got all %d pics to delete" % len(photos))
break
except:
logging.error("deleteAll: Flickr error while searching ....retrying")
logging.error(sys.exc_info()[0])
retries = retries + 1
if (not photos or len(photos) == 0):
logging.debug("deleteAll: No files in Flickr to delete" )
return None
logging.debug("deleteAll: found %d media files to delete" % (len(photos)))
while (len(photos)>1):
try:
photos.pop().delete()
print "deleting pic "
logging.debug("deleteAll: Removed one image... %d images to go" % (len(photos)))
except:
logging.error("deleteAll: Flickr error while deleting image")
logging.error(sys.exc_info()[0])
logging.debug("deleteAll: DONE DELETING - NOTHING ELSE TO DO - EXITING")
os._exit(1)
| Python |
#!/usr/bin/env python
import dbhash,anydbm
import sys, time, os, urllib2, shelve, string, logging, flickr, re
import xmltramp, mimetools, mimetypes, md5, webbrowser, exif, flickr2history, tags2set, deleteAll
from ConfigParser import *
import threading, Queue
#
# uploadr.py
#
# Upload images placed within a directory to your Flickr account.
#
# Requires:
# xmltramp http://www.aaronsw.com/2002/xmltramp/
# flickr account http://flickr.com
#
# Inspired by:
# http://micampe.it/things/flickruploadr
#
#
# September 2005
# Cameron Mallory cmallory/berserk.org
#
# This code has been updated to use the new Auth API from flickr.
#
# You may use this code however you see fit in any form whatsoever.
#
# 2009 Peter Kolarov - Updated with fixes and new functionality
#
#
configdict = ConfigParser()
configdict.read('uploadr.ini')
#
# Location to scan for new images
#
IMAGE_DIR = configdict.defaults()['imagedir']
#
# Flickr settings
#
FLICKR = {"title": "",
"description": "",
"tags": "auto-upload",
"is_public": configdict.defaults()['public'],
"is_friend": configdict.defaults()['friend'],
"is_family": configdict.defaults()['family'] }
#
# File we keep the history of uploaded images in.
#
HISTORY_FILE = configdict.defaults()['history_file']
NUM_THREADS = int(configdict.defaults()['num_threads'])
#Kodak cam EXIF tag keyword
XPKEYWORDS = 'Image XPKeywords'
##
## You shouldn't need to modify anything below here
##
FLICKR["secret" ] = "13c314caee8b1f31"
FLICKR["api_key" ] = "91dfde3ed605f6b8b9d9c38886547dcf"
flickr.API_KEY = FLICKR["api_key" ]
flickr.API_SECRET =FLICKR["secret" ]
flickr.tokenFile= ".flickrToken"
flickr.AUTH = True
class APIConstants:
base = "http://flickr.com/services/"
rest = base + "rest/"
auth = base + "auth/"
upload = base + "upload/"
token = "auth_token"
secret = "secret"
key = "api_key"
sig = "api_sig"
frob = "frob"
perms = "perms"
method = "method"
def __init__( self ):
pass
api = APIConstants()
class Uploadr:
token = None
perms = ""
TOKEN_FILE = flickr.tokenFile
def __init__( self ):
self.lock = threading.Lock()
self.token = self.getCachedToken()
"""
Signs args via md5 per http://www.flickr.com/services/api/auth.spec.html (Section 8)
"""
def signCall( self, data):
keys = data.keys()
keys.sort()
foo = ""
for a in keys:
foo += (a + data[a])
f = FLICKR[ api.secret ] + api.key + FLICKR[ api.key ] + foo
#f = api.key + FLICKR[ api.key ] + foo
return md5.new( f ).hexdigest()
def urlGen( self , base,data, sig ):
foo = base + "?"
for d in data:
foo += d + "=" + data[d] + "&"
return foo + api.key + "=" + FLICKR[ api.key ] + "&" + api.sig + "=" + sig
#
# Authenticate user so we can upload images
#
def authenticate( self ):
#print "Getting new Token"
self.getFrob()
self.getAuthKey()
self.getToken()
self.cacheToken()
"""
flickr.auth.getFrob
Returns a frob to be used during authentication. This method call must be
signed.
This method does not require authentication.
Arguments
api.key (Required)
Your API application key. See here for more details.
"""
def getFrob( self ):
d = {
api.method : "flickr.auth.getFrob"
}
sig = self.signCall( d )
url = self.urlGen( api.rest, d, sig )
try:
response = self.getResponse( url )
if ( self.isGood( response ) ):
FLICKR[ api.frob ] = str(response.frob)
else:
self.reportError( response )
except:
print "Error getting frob:" , str( sys.exc_info() )
logging.error(sys.exc_info())
"""
Checks to see if the user has authenticated this application
"""
def getAuthKey( self ):
d = {
api.frob : FLICKR[ api.frob ],
api.perms : "delete"
}
sig = self.signCall( d )
url = self.urlGen( api.auth, d, sig )
ans = ""
try:
webbrowser.open( url )
ans = raw_input("Have you authenticated this application? (Y/N): ")
except:
print str(sys.exc_info())
if ( ans.lower() == "n" ):
print "You need to allow this program to access your Flickr site."
print "A web browser should pop open with instructions."
print "After you have allowed access restart uploadr.py"
sys.exit()
"""
http://www.flickr.com/services/api/flickr.auth.getToken.html
flickr.auth.getToken
Returns the auth token for the given frob, if one has been attached. This method call must be signed.
Authentication
This method does not require authentication.
Arguments
NTC: We need to store the token in a file so we can get it and then check it insted of
getting a new on all the time.
api.key (Required)
Your API application key. See here for more details.
frob (Required)
The frob to check.
"""
def getToken( self ):
d = {
api.method : "flickr.auth.getToken",
api.frob : str(FLICKR[ api.frob ])
}
sig = self.signCall( d )
url = self.urlGen( api.rest, d, sig )
try:
res = self.getResponse( url )
if ( self.isGood( res ) ):
self.token = str(res.auth.token)
self.perms = str(res.auth.perms)
self.cacheToken()
else :
self.reportError( res )
except:
print str( sys.exc_info() )
logging.error(sys.exc_info())
"""
Attempts to get the flickr token from disk.
"""
def getCachedToken( self ):
if ( os.path.exists( self.TOKEN_FILE )):
return open( self.TOKEN_FILE ).read()
else :
return None
def cacheToken( self ):
try:
open( self.TOKEN_FILE , "w").write( str(self.token) )
except:
print "Issue writing token to local cache " , str(sys.exc_info())
logging.error(sys.exc_info())
"""
flickr.auth.checkToken
Returns the credentials attached to an authentication token.
Authentication
This method does not require authentication.
Arguments
api.key (Required)
Your API application key. See here for more details.
auth_token (Required)
The authentication token to check.
"""
def checkToken( self ):
if ( self.token == None ):
return False
else :
d = {
api.token : str(self.token) ,
api.method : "flickr.auth.checkToken"
}
sig = self.signCall( d )
url = self.urlGen( api.rest, d, sig )
try:
res = self.getResponse( url )
if ( self.isGood( res ) ):
self.token = res.auth.token
self.perms = res.auth.perms
return True
else :
self.reportError( res )
except:
print str( sys.exc_info() )
logging.error(sys.exc_info())
return False
def upload( self ):
print HISTORY_FILE
self.uploaded = shelve.open( HISTORY_FILE )
newImages = self.grabNewImages()
imageQueue = Queue.Queue()
for image in newImages:
imageQueue.put_nowait(image)
threads = []
for i in range( NUM_THREADS ):
thread = UploadThread(i, self, imageQueue)
threads.append(thread)
thread.start()
for thrd in threads:
thrd.join()
#get all images in folders and subfolders which match extensions below
def grabNewImages( self ):
images = []
foo = os.walk( IMAGE_DIR )
for data in foo:
(dirpath, dirnames, filenames) = data
for f in filenames :
ext = f.lower().split(".")[-1]
if ext in ("jpg", "jpeg", "gif", "png", "avi", "mov", "mp4"):
images.append( os.path.normpath( dirpath + "/" + f ) )
images.sort()
return images
def has_key(self, folderTag):
with self.lock:
return self.uploaded.has_key(folderTag)
def logUpload( self, photoID, imageName ):
photoID = str( photoID )
imageName = str( imageName )
with self.lock:
self.uploaded[ imageName ] = photoID
self.uploaded[ photoID ] = imageName
self.uploaded.close()
self.uploaded = shelve.open( HISTORY_FILE )
#
#
# build_request/encode_multipart_formdata code is from www.voidspace.org.uk/atlantibots/pythonutils.html
#
#
def build_request(self, theurl, fields, files, txheaders=None):
"""
Given the fields to set and the files to encode it returns a fully formed urllib2.Request object.
You can optionally pass in additional headers to encode into the opject. (Content-type and Content-length will be overridden if they are set).
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, filename, value) elements for data to be uploaded as files.
"""
content_type, body = self.encode_multipart_formdata(fields, files)
if not txheaders: txheaders = {}
txheaders['Content-type'] = content_type
txheaders['Content-length'] = str(len(body))
return urllib2.Request(theurl, body, txheaders)
def encode_multipart_formdata(self,fields, files, BOUNDARY = '-----'+mimetools.choose_boundary()+'-----'):
""" Encodes fields and files for uploading.
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, filename, value) elements for data to be uploaded as files.
Return (content_type, body) ready for urllib2.Request instance
You can optionally pass in a boundary string to use or we'll let mimetools provide one.
"""
CRLF = '\r\n'
L = []
if isinstance(fields, dict):
fields = fields.items()
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
filetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % filetype)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY # XXX what if no files are encoded
return content_type, body
def isGood( self, res ):
if ( not res == "" and res('stat') == "ok" ):
return True
else :
return False
def reportError( self, res ):
logging.error(res)
try:
print "Error:", str( res.err('code') + " " + res.err('msg') )
except:
print "Error: " + str( res )
"""
Send the url and get a response. Let errors float up
"""
def getResponse( self, url ):
xml = urllib2.urlopen( url ).read()
return xmltramp.parse( xml )
class UploadThread (threading.Thread):
def __init__(self, threadID, upl, imageQueue):
threading.Thread.__init__(self)
self.threadID = threadID
self.upl = upl
self.imageQueue = imageQueue
def uploadImage( self, image ):
folderTag = image[len(IMAGE_DIR):]
if ( not self.upl.has_key( folderTag ) ):
try:
logging.debug( "UploadThread %d Getting EXIF for %s" % (self.threadID, image))
f = open(image, 'rb')
exiftags = exif.process_file(f)
f.close()
#print exiftags[XPKEYWORDS]
#print folderTag
#make one tag equal to original file path with spaces replaced by # and start it with # (for easier recognition) since space is used as TAG separator by flickr
# this is needed for later syncing flickr with folders
realTags = folderTag.replace('\\',' ') # look for / or \ or _ or . and replace them with SPACE to make real Tags
realTags = realTags.replace('/',' ') # these will be the real tags ripped from folders
realTags = realTags.replace('_',' ')
realTags = realTags.replace('.',' ')
picTags = '#' + folderTag.replace(' ','#') + ' ' + realTags
if exiftags == {}:
logging.debug( 'UploadThread %d NO_EXIF_HEADER for %s' % (self.threadID, image))
else:
if XPKEYWORDS in exiftags: #look for additional tags in EXIF to tag picture with
if len(exiftags[XPKEYWORDS].printable) > 4:
picTags += exif.make_string( eval(exiftags[XPKEYWORDS].printable)).replace(';',' ')
#print picTags
logging.debug( "UploadThread %d Uploading image %s" % (self.threadID, image))
photo = ('photo', image, open(image,'rb').read())
d = {
api.token : str(self.upl.token),
api.perms : str(self.upl.perms),
"tags" : str(picTags),
"is_public" : str( FLICKR["is_public"] ),
"is_friend" : str( FLICKR["is_friend"] ),
"is_family" : str( FLICKR["is_family"] )
}
sig = self.upl.signCall( d )
d[ api.sig ] = sig
d[ api.key ] = FLICKR[ api.key ]
url = self.upl.build_request(api.upload, d, (photo,))
xml = urllib2.urlopen( url ).read()
res = xmltramp.parse(xml)
if ( self.upl.isGood( res ) ):
logging.debug( "successful.")
self.upl.logUpload( res.photoid, folderTag )
else :
print "problem.."
self.upl.reportError( res )
except:
logging.error(sys.exc_info())
def run(self):
logging.debug("Starting UploadThread %d " % self.threadID)
while True:
try:
image = self.imageQueue.get_nowait()
logging.debug("UploadThread %d qSize: %d processing %s" % (self.threadID, self.imageQueue.qsize(), image))
self.uploadImage(image)
except Queue.Empty:
break
logging.debug("Exiting UploadThread %d " % self.threadID)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='debug.log',
filemode='w')
logging.debug('Started')
console = logging.FileHandler('error.log')
console.setLevel(logging.ERROR)
logging.getLogger('').addHandler(console)
flickr = Uploadr()
if ( not flickr.checkToken() ):
flickr.authenticate()
#see if we need to wipe flickr account first
if(configdict.defaults()['remove_all_pics_first'].startswith('true')):
deleteAll.deleteAllPics()
os._exit(1) ## STOP HERE after deleting all media so user has chance to turn off switch before next start
images = flickr.grabNewImages()
#this is just double checking if everything is on Flickr what is in the history file
# in another words it will restore history file if deleted by comparing flickr with folders
flickr2history.reshelf(images, IMAGE_DIR, HISTORY_FILE, NUM_THREADS)
#uploads all images that are in folders and not in history file
flickr.upload() #uploads all new images to flickr
#this will organize uploaded files into sets with the names according to tags
tags2set.createSets( HISTORY_FILE)
| Python |
#!/usr/bin/python
import dbhash,anydbm
import sys, os, shelve, logging,string
from ConfigParser import *
import flickr
existingSets = None
user = None
configdict = ConfigParser()
configdict.read('uploadr.ini')
onlySubs = configdict.defaults()['only_sub_sets'] #set to true if Sets should be called only by the name of the last subfolder
def creatSet(photoSet, setName):
setName = setName.replace('\\',' ')
setName = setName.replace('/',' ')
setName = string.strip(setName)
photos = [] #real photo objects
for p in photoSet:
photos.append(flickr.Photo(id = p))
fset = None
#check if set with the name exists already
for s in existingSets:
if(s.title == setName):
fset= s
logging.debug('tags2set: Found existing set %s' % setName)
# return
break
try:
if(fset == None):
#print photos[0]
#print setName
fset = flickr.Photoset.create(photos[0], setName, 'auto generated by folders2flickr')
logging.debug('tags2set: Creating new set %s' % setName)
except:
logging.error('tags2set: Cannot create set %s' % setName)
logging.error(sys.exc_info()[0])
try:
fset.editPhotos(photos)
except:
logging.error('tags2set: Cannot edit set %s' % setName)
logging.error(sys.exc_info()[0])
logging.debug('tags2set: ...added %d photos' % len(photos) )
return fset
def createSets( historyFile):
global existingSets
global user
logging.debug('tags2set: Started tags2set')
try:
user = flickr.test_login()
logging.debug(user.id)
existingSets=user.getPhotosets()
except:
logging.error(sys.exc_info()[0])
return None
uploaded = shelve.open( historyFile )
keys = uploaded.keys()
keys.sort()
lastSetName =''
photoSet =[]
setName = ''
for image in keys:
if(image.startswith('\\') or image.startswith('/')): #filter out photoid keys
if(onlySubs.startswith('true')):
head, setName = os.path.split(os.path.dirname(image))
else:
setName = os.path.dirname(image) #set name is realy a directory
if(not lastSetName == setName and not lastSetName == ''):
#new set is starting so save last
#logging.debug( "Creating set %s with %d pictures" % (lastSetName, len(photoSet)) )
creatSet(photoSet, lastSetName)
photoSet = []
logging.debug("tags2set: Adding image %s" % image)
photoSet.append(uploaded.get(image))
lastSetName = setName
#dont forget to create last set
#logging.debug( "Creating set %s with %d pictures" % (setName, len(photoSet)) )
creatSet(photoSet, setName)
| Python |
#!/usr/bin/python
__author__ = "pkolarov@gmail.com"
import dbhash,anydbm
import sys, os, shelve, logging,string
import threading, Queue
import flickr
user = None
uploaded = None
lock = None
#get one and only one photo for the given tags or None
#this works only if we previously tagged all the pics on Flickr with uploader tool automaticaly
#
#Plus delete images that contain the same TAGS !!!!
def getPhotoIDbyTag(tag):
retries = 0
photos = None
while (retries < 3):
try:
logging.debug(user.id)
photos = flickr.photos_search(user_id=user.id, auth=all, tags=tag,tag_mode='any')
break
except:
logging.error("flickr2history: Flickr error while searching ....retrying")
logging.error(sys.exc_info()[0])
retries = retries + 1
if (not photos or len(photos) == 0):
logging.debug("flickr2history: No image in Flickr (yet) with tags %s (possibly deleted in Flickr by user)" % tag)
return None
logging.debug("flickr2history: Tag=%s found %d" % (tag, len(photos)))
while (len(photos)>1):
logging.debug( "flickr2history :Tag %s matches %d images!" % (tag, len(photos)))
logging.debug("flickr2history: Removing other images")
try:
photos.pop().delete()
except:
logging.error("flickr2history: Flickr error while deleting duplicate image")
logging.error(sys.exc_info()[0])
return photos[0]
class ReshelfThread (threading.Thread):
def __init__(self, threadID, imageDir, imageQueue, historyFile):
threading.Thread.__init__(self)
self.threadID = threadID
self.imageDir = imageDir
self.imageQueue = imageQueue
self.historyFile = historyFile
def has_key(self, image):
global lock
global uploaded
with lock:
return uploaded.has_key(str(image))
def update(self, image, photo):
global lock
global uploaded
with lock:
uploaded[ str(image)] = str(photo.id)
uploaded[ str(photo.id) ] =str(image)
uploaded.close();
uploaded = shelve.open(self.historyFile ) #its better to always reopen this file
def run(self):
logging.debug( "Starting ReshelfThread %d " % self.threadID )
while True:
try:
image = self.imageQueue.get_nowait()
logging.debug( "ReshelfThread %d qSize: %d processing %s" % (self.threadID, self.imageQueue.qsize(), image) )
image = image[len(self.imageDir):] #remove absolute directory
if ( not self.has_key(str(image) ) ):
#each picture should have one id tag in the folder format with spaces replaced by # and starting with #
flickrtag = '#' + image.replace(' ','#')
logging.debug(flickrtag)
photo = getPhotoIDbyTag(flickrtag)
logging.debug(image)
if not photo:
#uploaded.close() # flush the DB file
continue
logging.debug("ReshelfThread: Reregistering %s photo in local history file" % image)
self.update(image, photo)
except Queue.Empty:
break
logging.debug( "Exiting ReshelfThread %d " % self.threadID )
#store image reference in the history file if its not there yet and if we actually can
#find it on Flickr
def reshelf(images, imageDir, historyFile, numThreads):
logging.debug('flickr2history: Started flickr2history')
try:
global user
user = flickr.test_login()
logging.debug(user.id)
except:
logging.error(sys.exc_info()[0])
return None
imageQueue = Queue.Queue();
for image in images:
imageQueue.put_nowait(image)
global uploaded
uploaded = shelve.open( historyFile ) #its better to always reopen this file
global lock
lock = threading.Lock()
threads = []
for i in range(numThreads):
thread = ReshelfThread(i, imageDir, imageQueue, historyFile)
threads.append(thread)
thread.start()
for thrd in threads:
thrd.join()
uploaded.close()
logging.debug('flickr2history: Finished flickr2history')
| Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Library to extract EXIF information from digital camera image files
# http://sourceforge.net/projects/exif-py/
#
# VERSION 1.1.0
#
# To use this library call with:
# f = open(path_name, 'rb')
# tags = EXIF.process_file(f)
#
# To ignore MakerNote tags, pass the -q or --quick
# command line arguments, or as
# tags = EXIF.process_file(f, details=False)
#
# To stop processing after a certain tag is retrieved,
# pass the -t TAG or --stop-tag TAG argument, or as
# tags = EXIF.process_file(f, stop_tags=('TAG1','TAG2'))
#
# where TAG is a valid tag name, ex 'DateTimeOriginal'
#
# These 2 are useful when you are retrieving a large list of images
#
#
# To return an error on invalid tags,
# pass the -s or --strict argument, or as
# tags = EXIF.process_file(f, strict=True)
#
# Otherwise these tags will be ignored
#
# Returned tags will be a dictionary mapping names of EXIF tags to their
# values in the file named by path_name. You can process the tags
# as you wish. In particular, you can iterate through all the tags with:
# for tag in tags.keys():
# if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename',
# 'EXIF MakerNote'):
# print "Key: %s, value %s" % (tag, tags[tag])
# (This code uses the if statement to avoid printing out a few of the
# tags that tend to be long or boring.)
#
# The tags dictionary will include keys for all of the usual EXIF
# tags, and will also include keys for Makernotes used by some
# cameras, for which we have a good specification.
#
# Note that the dictionary keys are the IFD name followed by the
# tag name. For example:
# 'EXIF DateTimeOriginal', 'Image Orientation', 'MakerNote FocusMode'
#
# Copyright (c) 2002-2007 Gene Cash All rights reserved
# Copyright (c) 2007-2008 Ianaré Sévi All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of the authors nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# ----- See 'changes.txt' file for all contributors and changes ----- #
#
# Don't throw an exception when given an out of range character.
def make_string(seq):
str = ''
for c in seq:
# Screen out non-printing characters
if 32 <= c and c < 256:
str += chr(c)
# If no printing chars
if not str:
return seq
return str
# Special version to deal with the code in the first 8 bytes of a user comment.
# First 8 bytes gives coding system e.g. ASCII vs. JIS vs Unicode
def make_string_uc(seq):
code = seq[0:8]
seq = seq[8:]
# Of course, this is only correct if ASCII, and the standard explicitly
# allows JIS and Unicode.
return make_string(seq)
# field type descriptions as (length, abbreviation, full name) tuples
FIELD_TYPES = (
(0, 'X', 'Proprietary'), # no such type
(1, 'B', 'Byte'),
(1, 'A', 'ASCII'),
(2, 'S', 'Short'),
(4, 'L', 'Long'),
(8, 'R', 'Ratio'),
(1, 'SB', 'Signed Byte'),
(1, 'U', 'Undefined'),
(2, 'SS', 'Signed Short'),
(4, 'SL', 'Signed Long'),
(8, 'SR', 'Signed Ratio'),
)
# dictionary of main EXIF tag names
# first element of tuple is tag name, optional second element is
# another dictionary giving names to values
EXIF_TAGS = {
0x0100: ('ImageWidth', ),
0x0101: ('ImageLength', ),
0x0102: ('BitsPerSample', ),
0x0103: ('Compression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed'}),
0x0106: ('PhotometricInterpretation', ),
0x0107: ('Thresholding', ),
0x010A: ('FillOrder', ),
0x010D: ('DocumentName', ),
0x010E: ('ImageDescription', ),
0x010F: ('Make', ),
0x0110: ('Model', ),
0x0111: ('StripOffsets', ),
0x0112: ('Orientation',
{1: 'Horizontal (normal)',
2: 'Mirrored horizontal',
3: 'Rotated 180',
4: 'Mirrored vertical',
5: 'Mirrored horizontal then rotated 90 CCW',
6: 'Rotated 90 CW',
7: 'Mirrored horizontal then rotated 90 CW',
8: 'Rotated 90 CCW'}),
0x0115: ('SamplesPerPixel', ),
0x0116: ('RowsPerStrip', ),
0x0117: ('StripByteCounts', ),
0x011A: ('XResolution', ),
0x011B: ('YResolution', ),
0x011C: ('PlanarConfiguration', ),
0x011D: ('PageName', make_string),
0x0128: ('ResolutionUnit',
{1: 'Not Absolute',
2: 'Pixels/Inch',
3: 'Pixels/Centimeter'}),
0x012D: ('TransferFunction', ),
0x0131: ('Software', ),
0x0132: ('DateTime', ),
0x013B: ('Artist', ),
0x013E: ('WhitePoint', ),
0x013F: ('PrimaryChromaticities', ),
0x0156: ('TransferRange', ),
0x0200: ('JPEGProc', ),
0x0201: ('JPEGInterchangeFormat', ),
0x0202: ('JPEGInterchangeFormatLength', ),
0x0211: ('YCbCrCoefficients', ),
0x0212: ('YCbCrSubSampling', ),
0x0213: ('YCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0214: ('ReferenceBlackWhite', ),
0x4746: ('Rating', ),
0x828D: ('CFARepeatPatternDim', ),
0x828E: ('CFAPattern', ),
0x828F: ('BatteryLevel', ),
0x8298: ('Copyright', ),
0x829A: ('ExposureTime', ),
0x829D: ('FNumber', ),
0x83BB: ('IPTC/NAA', ),
0x8769: ('ExifOffset', ),
0x8773: ('InterColorProfile', ),
0x8822: ('ExposureProgram',
{0: 'Unidentified',
1: 'Manual',
2: 'Program Normal',
3: 'Aperture Priority',
4: 'Shutter Priority',
5: 'Program Creative',
6: 'Program Action',
7: 'Portrait Mode',
8: 'Landscape Mode'}),
0x8824: ('SpectralSensitivity', ),
0x8825: ('GPSInfo', ),
0x8827: ('ISOSpeedRatings', ),
0x8828: ('OECF', ),
0x9000: ('ExifVersion', make_string),
0x9003: ('DateTimeOriginal', ),
0x9004: ('DateTimeDigitized', ),
0x9101: ('ComponentsConfiguration',
{0: '',
1: 'Y',
2: 'Cb',
3: 'Cr',
4: 'Red',
5: 'Green',
6: 'Blue'}),
0x9102: ('CompressedBitsPerPixel', ),
0x9201: ('ShutterSpeedValue', ),
0x9202: ('ApertureValue', ),
0x9203: ('BrightnessValue', ),
0x9204: ('ExposureBiasValue', ),
0x9205: ('MaxApertureValue', ),
0x9206: ('SubjectDistance', ),
0x9207: ('MeteringMode',
{0: 'Unidentified',
1: 'Average',
2: 'CenterWeightedAverage',
3: 'Spot',
4: 'MultiSpot',
5: 'Pattern'}),
0x9208: ('LightSource',
{0: 'Unknown',
1: 'Daylight',
2: 'Fluorescent',
3: 'Tungsten',
9: 'Fine Weather',
10: 'Flash',
11: 'Shade',
12: 'Daylight Fluorescent',
13: 'Day White Fluorescent',
14: 'Cool White Fluorescent',
15: 'White Fluorescent',
17: 'Standard Light A',
18: 'Standard Light B',
19: 'Standard Light C',
20: 'D55',
21: 'D65',
22: 'D75',
255: 'Other'}),
0x9209: ('Flash',
{0: 'No',
1: 'Fired',
5: 'Fired (?)', # no return sensed
7: 'Fired (!)', # return sensed
9: 'Fill Fired',
13: 'Fill Fired (?)',
15: 'Fill Fired (!)',
16: 'Off',
24: 'Auto Off',
25: 'Auto Fired',
29: 'Auto Fired (?)',
31: 'Auto Fired (!)',
32: 'Not Available'}),
0x920A: ('FocalLength', ),
0x9214: ('SubjectArea', ),
0x927C: ('MakerNote', ),
0x9286: ('UserComment', make_string_uc),
0x9290: ('SubSecTime', ),
0x9291: ('SubSecTimeOriginal', ),
0x9292: ('SubSecTimeDigitized', ),
# used by Windows Explorer
0x9C9B: ('XPTitle', ),
0x9C9C: ('XPComment', ),
0x9C9D: ('XPAuthor', ), #(ignored by Windows Explorer if Artist exists)
0x9C9E: ('XPKeywords', ),
0x9C9F: ('XPSubject', ),
0xA000: ('FlashPixVersion', make_string),
0xA001: ('ColorSpace',
{1: 'sRGB',
2: 'Adobe RGB',
65535: 'Uncalibrated'}),
0xA002: ('ExifImageWidth', ),
0xA003: ('ExifImageLength', ),
0xA005: ('InteroperabilityOffset', ),
0xA20B: ('FlashEnergy', ), # 0x920B in TIFF/EP
0xA20C: ('SpatialFrequencyResponse', ), # 0x920C
0xA20E: ('FocalPlaneXResolution', ), # 0x920E
0xA20F: ('FocalPlaneYResolution', ), # 0x920F
0xA210: ('FocalPlaneResolutionUnit', ), # 0x9210
0xA214: ('SubjectLocation', ), # 0x9214
0xA215: ('ExposureIndex', ), # 0x9215
0xA217: ('SensingMethod', # 0x9217
{1: 'Not defined',
2: 'One-chip color area',
3: 'Two-chip color area',
4: 'Three-chip color area',
5: 'Color sequential area',
7: 'Trilinear',
8: 'Color sequential linear'}),
0xA300: ('FileSource',
{1: 'Film Scanner',
2: 'Reflection Print Scanner',
3: 'Digital Camera'}),
0xA301: ('SceneType',
{1: 'Directly Photographed'}),
0xA302: ('CVAPattern', ),
0xA401: ('CustomRendered',
{0: 'Normal',
1: 'Custom'}),
0xA402: ('ExposureMode',
{0: 'Auto Exposure',
1: 'Manual Exposure',
2: 'Auto Bracket'}),
0xA403: ('WhiteBalance',
{0: 'Auto',
1: 'Manual'}),
0xA404: ('DigitalZoomRatio', ),
0xA405: ('FocalLengthIn35mmFilm', ),
0xA406: ('SceneCaptureType',
{0: 'Standard',
1: 'Landscape',
2: 'Portrait',
3: 'Night)'}),
0xA407: ('GainControl',
{0: 'None',
1: 'Low gain up',
2: 'High gain up',
3: 'Low gain down',
4: 'High gain down'}),
0xA408: ('Contrast',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA409: ('Saturation',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40A: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0xA40B: ('DeviceSettingDescription', ),
0xA40C: ('SubjectDistanceRange', ),
0xA500: ('Gamma', ),
0xC4A5: ('PrintIM', ),
0xEA1C: ('Padding', ),
}
# interoperability tags
INTR_TAGS = {
0x0001: ('InteroperabilityIndex', ),
0x0002: ('InteroperabilityVersion', ),
0x1000: ('RelatedImageFileFormat', ),
0x1001: ('RelatedImageWidth', ),
0x1002: ('RelatedImageLength', ),
}
# GPS tags (not used yet, haven't seen camera with GPS)
GPS_TAGS = {
0x0000: ('GPSVersionID', ),
0x0001: ('GPSLatitudeRef', ),
0x0002: ('GPSLatitude', ),
0x0003: ('GPSLongitudeRef', ),
0x0004: ('GPSLongitude', ),
0x0005: ('GPSAltitudeRef', ),
0x0006: ('GPSAltitude', ),
0x0007: ('GPSTimeStamp', ),
0x0008: ('GPSSatellites', ),
0x0009: ('GPSStatus', ),
0x000A: ('GPSMeasureMode', ),
0x000B: ('GPSDOP', ),
0x000C: ('GPSSpeedRef', ),
0x000D: ('GPSSpeed', ),
0x000E: ('GPSTrackRef', ),
0x000F: ('GPSTrack', ),
0x0010: ('GPSImgDirectionRef', ),
0x0011: ('GPSImgDirection', ),
0x0012: ('GPSMapDatum', ),
0x0013: ('GPSDestLatitudeRef', ),
0x0014: ('GPSDestLatitude', ),
0x0015: ('GPSDestLongitudeRef', ),
0x0016: ('GPSDestLongitude', ),
0x0017: ('GPSDestBearingRef', ),
0x0018: ('GPSDestBearing', ),
0x0019: ('GPSDestDistanceRef', ),
0x001A: ('GPSDestDistance', ),
0x001D: ('GPSDate', ),
}
# Ignore these tags when quick processing
# 0x927C is MakerNote Tags
# 0x9286 is user comment
IGNORE_TAGS=(0x9286, 0x927C)
# http://tomtia.plala.jp/DigitalCamera/MakerNote/index.asp
def nikon_ev_bias(seq):
# First digit seems to be in steps of 1/6 EV.
# Does the third value mean the step size? It is usually 6,
# but it is 12 for the ExposureDifference.
#
# Check for an error condition that could cause a crash.
# This only happens if something has gone really wrong in
# reading the Nikon MakerNote.
if len( seq ) < 4 : return ""
#
if seq == [252, 1, 6, 0]:
return "-2/3 EV"
if seq == [253, 1, 6, 0]:
return "-1/2 EV"
if seq == [254, 1, 6, 0]:
return "-1/3 EV"
if seq == [0, 1, 6, 0]:
return "0 EV"
if seq == [2, 1, 6, 0]:
return "+1/3 EV"
if seq == [3, 1, 6, 0]:
return "+1/2 EV"
if seq == [4, 1, 6, 0]:
return "+2/3 EV"
# Handle combinations not in the table.
a = seq[0]
# Causes headaches for the +/- logic, so special case it.
if a == 0:
return "0 EV"
if a > 127:
a = 256 - a
ret_str = "-"
else:
ret_str = "+"
b = seq[2] # Assume third value means the step size
whole = a / b
a = a % b
if whole != 0:
ret_str = ret_str + str(whole) + " "
if a == 0:
ret_str = ret_str + "EV"
else:
r = Ratio(a, b)
ret_str = ret_str + r.__repr__() + " EV"
return ret_str
# Nikon E99x MakerNote Tags
MAKERNOTE_NIKON_NEWER_TAGS={
0x0001: ('MakernoteVersion', make_string), # Sometimes binary
0x0002: ('ISOSetting', make_string),
0x0003: ('ColorMode', ),
0x0004: ('Quality', ),
0x0005: ('Whitebalance', ),
0x0006: ('ImageSharpening', ),
0x0007: ('FocusMode', ),
0x0008: ('FlashSetting', ),
0x0009: ('AutoFlashMode', ),
0x000B: ('WhiteBalanceBias', ),
0x000C: ('WhiteBalanceRBCoeff', ),
0x000D: ('ProgramShift', nikon_ev_bias),
# Nearly the same as the other EV vals, but step size is 1/12 EV (?)
0x000E: ('ExposureDifference', nikon_ev_bias),
0x000F: ('ISOSelection', ),
0x0011: ('NikonPreview', ),
0x0012: ('FlashCompensation', nikon_ev_bias),
0x0013: ('ISOSpeedRequested', ),
0x0016: ('PhotoCornerCoordinates', ),
# 0x0017: Unknown, but most likely an EV value
0x0018: ('FlashBracketCompensationApplied', nikon_ev_bias),
0x0019: ('AEBracketCompensationApplied', ),
0x001A: ('ImageProcessing', ),
0x001B: ('CropHiSpeed', ),
0x001D: ('SerialNumber', ), # Conflict with 0x00A0 ?
0x001E: ('ColorSpace', ),
0x001F: ('VRInfo', ),
0x0020: ('ImageAuthentication', ),
0x0022: ('ActiveDLighting', ),
0x0023: ('PictureControl', ),
0x0024: ('WorldTime', ),
0x0025: ('ISOInfo', ),
0x0080: ('ImageAdjustment', ),
0x0081: ('ToneCompensation', ),
0x0082: ('AuxiliaryLens', ),
0x0083: ('LensType', ),
0x0084: ('LensMinMaxFocalMaxAperture', ),
0x0085: ('ManualFocusDistance', ),
0x0086: ('DigitalZoomFactor', ),
0x0087: ('FlashMode',
{0x00: 'Did Not Fire',
0x01: 'Fired, Manual',
0x07: 'Fired, External',
0x08: 'Fired, Commander Mode ',
0x09: 'Fired, TTL Mode'}),
0x0088: ('AFFocusPosition',
{0x0000: 'Center',
0x0100: 'Top',
0x0200: 'Bottom',
0x0300: 'Left',
0x0400: 'Right'}),
0x0089: ('BracketingMode',
{0x00: 'Single frame, no bracketing',
0x01: 'Continuous, no bracketing',
0x02: 'Timer, no bracketing',
0x10: 'Single frame, exposure bracketing',
0x11: 'Continuous, exposure bracketing',
0x12: 'Timer, exposure bracketing',
0x40: 'Single frame, white balance bracketing',
0x41: 'Continuous, white balance bracketing',
0x42: 'Timer, white balance bracketing'}),
0x008A: ('AutoBracketRelease', ),
0x008B: ('LensFStops', ),
0x008C: ('NEFCurve1', ), # ExifTool calls this 'ContrastCurve'
0x008D: ('ColorMode', ),
0x008F: ('SceneMode', ),
0x0090: ('LightingType', ),
0x0091: ('ShotInfo', ), # First 4 bytes are a version number in ASCII
0x0092: ('HueAdjustment', ),
# ExifTool calls this 'NEFCompression', should be 1-4
0x0093: ('Compression', ),
0x0094: ('Saturation',
{-3: 'B&W',
-2: '-2',
-1: '-1',
0: '0',
1: '1',
2: '2'}),
0x0095: ('NoiseReduction', ),
0x0096: ('NEFCurve2', ), # ExifTool calls this 'LinearizationTable'
0x0097: ('ColorBalance', ), # First 4 bytes are a version number in ASCII
0x0098: ('LensData', ), # First 4 bytes are a version number in ASCII
0x0099: ('RawImageCenter', ),
0x009A: ('SensorPixelSize', ),
0x009C: ('Scene Assist', ),
0x009E: ('RetouchHistory', ),
0x00A0: ('SerialNumber', ),
0x00A2: ('ImageDataSize', ),
# 00A3: unknown - a single byte 0
# 00A4: In NEF, looks like a 4 byte ASCII version number ('0200')
0x00A5: ('ImageCount', ),
0x00A6: ('DeletedImageCount', ),
0x00A7: ('TotalShutterReleases', ),
# First 4 bytes are a version number in ASCII, with version specific
# info to follow. Its hard to treat it as a string due to embedded nulls.
0x00A8: ('FlashInfo', ),
0x00A9: ('ImageOptimization', ),
0x00AA: ('Saturation', ),
0x00AB: ('DigitalVariProgram', ),
0x00AC: ('ImageStabilization', ),
0x00AD: ('Responsive AF', ), # 'AFResponse'
0x00B0: ('MultiExposure', ),
0x00B1: ('HighISONoiseReduction', ),
0x00B7: ('AFInfo', ),
0x00B8: ('FileInfo', ),
# 00B9: unknown
0x0100: ('DigitalICE', ),
0x0103: ('PreviewCompression',
{1: 'Uncompressed',
2: 'CCITT 1D',
3: 'T4/Group 3 Fax',
4: 'T6/Group 4 Fax',
5: 'LZW',
6: 'JPEG (old-style)',
7: 'JPEG',
8: 'Adobe Deflate',
9: 'JBIG B&W',
10: 'JBIG Color',
32766: 'Next',
32769: 'Epson ERF Compressed',
32771: 'CCIRLEW',
32773: 'PackBits',
32809: 'Thunderscan',
32895: 'IT8CTPAD',
32896: 'IT8LW',
32897: 'IT8MP',
32898: 'IT8BL',
32908: 'PixarFilm',
32909: 'PixarLog',
32946: 'Deflate',
32947: 'DCS',
34661: 'JBIG',
34676: 'SGILog',
34677: 'SGILog24',
34712: 'JPEG 2000',
34713: 'Nikon NEF Compressed',
65000: 'Kodak DCR Compressed',
65535: 'Pentax PEF Compressed',}),
0x0201: ('PreviewImageStart', ),
0x0202: ('PreviewImageLength', ),
0x0213: ('PreviewYCbCrPositioning',
{1: 'Centered',
2: 'Co-sited'}),
0x0010: ('DataDump', ),
}
MAKERNOTE_NIKON_OLDER_TAGS = {
0x0003: ('Quality',
{1: 'VGA Basic',
2: 'VGA Normal',
3: 'VGA Fine',
4: 'SXGA Basic',
5: 'SXGA Normal',
6: 'SXGA Fine'}),
0x0004: ('ColorMode',
{1: 'Color',
2: 'Monochrome'}),
0x0005: ('ImageAdjustment',
{0: 'Normal',
1: 'Bright+',
2: 'Bright-',
3: 'Contrast+',
4: 'Contrast-'}),
0x0006: ('CCDSpeed',
{0: 'ISO 80',
2: 'ISO 160',
4: 'ISO 320',
5: 'ISO 100'}),
0x0007: ('WhiteBalance',
{0: 'Auto',
1: 'Preset',
2: 'Daylight',
3: 'Incandescent',
4: 'Fluorescent',
5: 'Cloudy',
6: 'Speed Light'}),
}
# decode Olympus SpecialMode tag in MakerNote
def olympus_special_mode(v):
a={
0: 'Normal',
1: 'Unknown',
2: 'Fast',
3: 'Panorama'}
b={
0: 'Non-panoramic',
1: 'Left to right',
2: 'Right to left',
3: 'Bottom to top',
4: 'Top to bottom'}
if v[0] not in a or v[2] not in b:
return v
return '%s - sequence %d - %s' % (a[v[0]], v[1], b[v[2]])
MAKERNOTE_OLYMPUS_TAGS={
# ah HAH! those sneeeeeaky bastids! this is how they get past the fact
# that a JPEG thumbnail is not allowed in an uncompressed TIFF file
0x0100: ('JPEGThumbnail', ),
0x0200: ('SpecialMode', olympus_special_mode),
0x0201: ('JPEGQual',
{1: 'SQ',
2: 'HQ',
3: 'SHQ'}),
0x0202: ('Macro',
{0: 'Normal',
1: 'Macro',
2: 'SuperMacro'}),
0x0203: ('BWMode',
{0: 'Off',
1: 'On'}),
0x0204: ('DigitalZoom', ),
0x0205: ('FocalPlaneDiagonal', ),
0x0206: ('LensDistortionParams', ),
0x0207: ('SoftwareRelease', ),
0x0208: ('PictureInfo', ),
0x0209: ('CameraID', make_string), # print as string
0x0F00: ('DataDump', ),
0x0300: ('PreCaptureFrames', ),
0x0404: ('SerialNumber', ),
0x1000: ('ShutterSpeedValue', ),
0x1001: ('ISOValue', ),
0x1002: ('ApertureValue', ),
0x1003: ('BrightnessValue', ),
0x1004: ('FlashMode', ),
0x1004: ('FlashMode',
{2: 'On',
3: 'Off'}),
0x1005: ('FlashDevice',
{0: 'None',
1: 'Internal',
4: 'External',
5: 'Internal + External'}),
0x1006: ('ExposureCompensation', ),
0x1007: ('SensorTemperature', ),
0x1008: ('LensTemperature', ),
0x100b: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1017: ('RedBalance', ),
0x1018: ('BlueBalance', ),
0x101a: ('SerialNumber', ),
0x1023: ('FlashExposureComp', ),
0x1026: ('ExternalFlashBounce',
{0: 'No',
1: 'Yes'}),
0x1027: ('ExternalFlashZoom', ),
0x1028: ('ExternalFlashMode', ),
0x1029: ('Contrast int16u',
{0: 'High',
1: 'Normal',
2: 'Low'}),
0x102a: ('SharpnessFactor', ),
0x102b: ('ColorControl', ),
0x102c: ('ValidBits', ),
0x102d: ('CoringFilter', ),
0x102e: ('OlympusImageWidth', ),
0x102f: ('OlympusImageHeight', ),
0x1034: ('CompressionRatio', ),
0x1035: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x1036: ('PreviewImageStart', ),
0x1037: ('PreviewImageLength', ),
0x1039: ('CCDScanMode',
{0: 'Interlaced',
1: 'Progressive'}),
0x103a: ('NoiseReduction',
{0: 'Off',
1: 'On'}),
0x103b: ('InfinityLensStep', ),
0x103c: ('NearLensStep', ),
# TODO - these need extra definitions
# http://search.cpan.org/src/EXIFTOOL/Image-ExifTool-6.90/html/TagNames/Olympus.html
0x2010: ('Equipment', ),
0x2020: ('CameraSettings', ),
0x2030: ('RawDevelopment', ),
0x2040: ('ImageProcessing', ),
0x2050: ('FocusInfo', ),
0x3000: ('RawInfo ', ),
}
# 0x2020 CameraSettings
MAKERNOTE_OLYMPUS_TAG_0x2020={
0x0100: ('PreviewImageValid',
{0: 'No',
1: 'Yes'}),
0x0101: ('PreviewImageStart', ),
0x0102: ('PreviewImageLength', ),
0x0200: ('ExposureMode',
{1: 'Manual',
2: 'Program',
3: 'Aperture-priority AE',
4: 'Shutter speed priority AE',
5: 'Program-shift'}),
0x0201: ('AELock',
{0: 'Off',
1: 'On'}),
0x0202: ('MeteringMode',
{2: 'Center Weighted',
3: 'Spot',
5: 'ESP',
261: 'Pattern+AF',
515: 'Spot+Highlight control',
1027: 'Spot+Shadow control'}),
0x0300: ('MacroMode',
{0: 'Off',
1: 'On'}),
0x0301: ('FocusMode',
{0: 'Single AF',
1: 'Sequential shooting AF',
2: 'Continuous AF',
3: 'Multi AF',
10: 'MF'}),
0x0302: ('FocusProcess',
{0: 'AF Not Used',
1: 'AF Used'}),
0x0303: ('AFSearch',
{0: 'Not Ready',
1: 'Ready'}),
0x0304: ('AFAreas', ),
0x0401: ('FlashExposureCompensation', ),
0x0500: ('WhiteBalance2',
{0: 'Auto',
16: '7500K (Fine Weather with Shade)',
17: '6000K (Cloudy)',
18: '5300K (Fine Weather)',
20: '3000K (Tungsten light)',
21: '3600K (Tungsten light-like)',
33: '6600K (Daylight fluorescent)',
34: '4500K (Neutral white fluorescent)',
35: '4000K (Cool white fluorescent)',
48: '3600K (Tungsten light-like)',
256: 'Custom WB 1',
257: 'Custom WB 2',
258: 'Custom WB 3',
259: 'Custom WB 4',
512: 'Custom WB 5400K',
513: 'Custom WB 2900K',
514: 'Custom WB 8000K', }),
0x0501: ('WhiteBalanceTemperature', ),
0x0502: ('WhiteBalanceBracket', ),
0x0503: ('CustomSaturation', ), # (3 numbers: 1. CS Value, 2. Min, 3. Max)
0x0504: ('ModifiedSaturation',
{0: 'Off',
1: 'CM1 (Red Enhance)',
2: 'CM2 (Green Enhance)',
3: 'CM3 (Blue Enhance)',
4: 'CM4 (Skin Tones)'}),
0x0505: ('ContrastSetting', ), # (3 numbers: 1. Contrast, 2. Min, 3. Max)
0x0506: ('SharpnessSetting', ), # (3 numbers: 1. Sharpness, 2. Min, 3. Max)
0x0507: ('ColorSpace',
{0: 'sRGB',
1: 'Adobe RGB',
2: 'Pro Photo RGB'}),
0x0509: ('SceneMode',
{0: 'Standard',
6: 'Auto',
7: 'Sport',
8: 'Portrait',
9: 'Landscape+Portrait',
10: 'Landscape',
11: 'Night scene',
13: 'Panorama',
16: 'Landscape+Portrait',
17: 'Night+Portrait',
19: 'Fireworks',
20: 'Sunset',
22: 'Macro',
25: 'Documents',
26: 'Museum',
28: 'Beach&Snow',
30: 'Candle',
35: 'Underwater Wide1',
36: 'Underwater Macro',
39: 'High Key',
40: 'Digital Image Stabilization',
44: 'Underwater Wide2',
45: 'Low Key',
46: 'Children',
48: 'Nature Macro'}),
0x050a: ('NoiseReduction',
{0: 'Off',
1: 'Noise Reduction',
2: 'Noise Filter',
3: 'Noise Reduction + Noise Filter',
4: 'Noise Filter (ISO Boost)',
5: 'Noise Reduction + Noise Filter (ISO Boost)'}),
0x050b: ('DistortionCorrection',
{0: 'Off',
1: 'On'}),
0x050c: ('ShadingCompensation',
{0: 'Off',
1: 'On'}),
0x050d: ('CompressionFactor', ),
0x050f: ('Gradation',
{'-1 -1 1': 'Low Key',
'0 -1 1': 'Normal',
'1 -1 1': 'High Key'}),
0x0520: ('PictureMode',
{1: 'Vivid',
2: 'Natural',
3: 'Muted',
256: 'Monotone',
512: 'Sepia'}),
0x0521: ('PictureModeSaturation', ),
0x0522: ('PictureModeHue?', ),
0x0523: ('PictureModeContrast', ),
0x0524: ('PictureModeSharpness', ),
0x0525: ('PictureModeBWFilter',
{0: 'n/a',
1: 'Neutral',
2: 'Yellow',
3: 'Orange',
4: 'Red',
5: 'Green'}),
0x0526: ('PictureModeTone',
{0: 'n/a',
1: 'Neutral',
2: 'Sepia',
3: 'Blue',
4: 'Purple',
5: 'Green'}),
0x0600: ('Sequence', ), # 2 or 3 numbers: 1. Mode, 2. Shot number, 3. Mode bits
0x0601: ('PanoramaMode', ), # (2 numbers: 1. Mode, 2. Shot number)
0x0603: ('ImageQuality2',
{1: 'SQ',
2: 'HQ',
3: 'SHQ',
4: 'RAW'}),
0x0901: ('ManometerReading', ),
}
MAKERNOTE_CASIO_TAGS={
0x0001: ('RecordingMode',
{1: 'Single Shutter',
2: 'Panorama',
3: 'Night Scene',
4: 'Portrait',
5: 'Landscape'}),
0x0002: ('Quality',
{1: 'Economy',
2: 'Normal',
3: 'Fine'}),
0x0003: ('FocusingMode',
{2: 'Macro',
3: 'Auto Focus',
4: 'Manual Focus',
5: 'Infinity'}),
0x0004: ('FlashMode',
{1: 'Auto',
2: 'On',
3: 'Off',
4: 'Red Eye Reduction'}),
0x0005: ('FlashIntensity',
{11: 'Weak',
13: 'Normal',
15: 'Strong'}),
0x0006: ('Object Distance', ),
0x0007: ('WhiteBalance',
{1: 'Auto',
2: 'Tungsten',
3: 'Daylight',
4: 'Fluorescent',
5: 'Shade',
129: 'Manual'}),
0x000B: ('Sharpness',
{0: 'Normal',
1: 'Soft',
2: 'Hard'}),
0x000C: ('Contrast',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x000D: ('Saturation',
{0: 'Normal',
1: 'Low',
2: 'High'}),
0x0014: ('CCDSpeed',
{64: 'Normal',
80: 'Normal',
100: 'High',
125: '+1.0',
244: '+3.0',
250: '+2.0'}),
}
MAKERNOTE_FUJIFILM_TAGS={
0x0000: ('NoteVersion', make_string),
0x1000: ('Quality', ),
0x1001: ('Sharpness',
{1: 'Soft',
2: 'Soft',
3: 'Normal',
4: 'Hard',
5: 'Hard'}),
0x1002: ('WhiteBalance',
{0: 'Auto',
256: 'Daylight',
512: 'Cloudy',
768: 'DaylightColor-Fluorescent',
769: 'DaywhiteColor-Fluorescent',
770: 'White-Fluorescent',
1024: 'Incandescent',
3840: 'Custom'}),
0x1003: ('Color',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1004: ('Tone',
{0: 'Normal',
256: 'High',
512: 'Low'}),
0x1010: ('FlashMode',
{0: 'Auto',
1: 'On',
2: 'Off',
3: 'Red Eye Reduction'}),
0x1011: ('FlashStrength', ),
0x1020: ('Macro',
{0: 'Off',
1: 'On'}),
0x1021: ('FocusMode',
{0: 'Auto',
1: 'Manual'}),
0x1030: ('SlowSync',
{0: 'Off',
1: 'On'}),
0x1031: ('PictureMode',
{0: 'Auto',
1: 'Portrait',
2: 'Landscape',
4: 'Sports',
5: 'Night',
6: 'Program AE',
256: 'Aperture Priority AE',
512: 'Shutter Priority AE',
768: 'Manual Exposure'}),
0x1100: ('MotorOrBracket',
{0: 'Off',
1: 'On'}),
0x1300: ('BlurWarning',
{0: 'Off',
1: 'On'}),
0x1301: ('FocusWarning',
{0: 'Off',
1: 'On'}),
0x1302: ('AEWarning',
{0: 'Off',
1: 'On'}),
}
MAKERNOTE_CANON_TAGS = {
0x0006: ('ImageType', ),
0x0007: ('FirmwareVersion', ),
0x0008: ('ImageNumber', ),
0x0009: ('OwnerName', ),
}
# this is in element offset, name, optional value dictionary format
MAKERNOTE_CANON_TAG_0x001 = {
1: ('Macromode',
{1: 'Macro',
2: 'Normal'}),
2: ('SelfTimer', ),
3: ('Quality',
{2: 'Normal',
3: 'Fine',
5: 'Superfine'}),
4: ('FlashMode',
{0: 'Flash Not Fired',
1: 'Auto',
2: 'On',
3: 'Red-Eye Reduction',
4: 'Slow Synchro',
5: 'Auto + Red-Eye Reduction',
6: 'On + Red-Eye Reduction',
16: 'external flash'}),
5: ('ContinuousDriveMode',
{0: 'Single Or Timer',
1: 'Continuous'}),
7: ('FocusMode',
{0: 'One-Shot',
1: 'AI Servo',
2: 'AI Focus',
3: 'MF',
4: 'Single',
5: 'Continuous',
6: 'MF'}),
10: ('ImageSize',
{0: 'Large',
1: 'Medium',
2: 'Small'}),
11: ('EasyShootingMode',
{0: 'Full Auto',
1: 'Manual',
2: 'Landscape',
3: 'Fast Shutter',
4: 'Slow Shutter',
5: 'Night',
6: 'B&W',
7: 'Sepia',
8: 'Portrait',
9: 'Sports',
10: 'Macro/Close-Up',
11: 'Pan Focus'}),
12: ('DigitalZoom',
{0: 'None',
1: '2x',
2: '4x'}),
13: ('Contrast',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
14: ('Saturation',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
15: ('Sharpness',
{0xFFFF: 'Low',
0: 'Normal',
1: 'High'}),
16: ('ISO',
{0: 'See ISOSpeedRatings Tag',
15: 'Auto',
16: '50',
17: '100',
18: '200',
19: '400'}),
17: ('MeteringMode',
{3: 'Evaluative',
4: 'Partial',
5: 'Center-weighted'}),
18: ('FocusType',
{0: 'Manual',
1: 'Auto',
3: 'Close-Up (Macro)',
8: 'Locked (Pan Mode)'}),
19: ('AFPointSelected',
{0x3000: 'None (MF)',
0x3001: 'Auto-Selected',
0x3002: 'Right',
0x3003: 'Center',
0x3004: 'Left'}),
20: ('ExposureMode',
{0: 'Easy Shooting',
1: 'Program',
2: 'Tv-priority',
3: 'Av-priority',
4: 'Manual',
5: 'A-DEP'}),
23: ('LongFocalLengthOfLensInFocalUnits', ),
24: ('ShortFocalLengthOfLensInFocalUnits', ),
25: ('FocalUnitsPerMM', ),
28: ('FlashActivity',
{0: 'Did Not Fire',
1: 'Fired'}),
29: ('FlashDetails',
{14: 'External E-TTL',
13: 'Internal Flash',
11: 'FP Sync Used',
7: '2nd("Rear")-Curtain Sync Used',
4: 'FP Sync Enabled'}),
32: ('FocusMode',
{0: 'Single',
1: 'Continuous'}),
}
MAKERNOTE_CANON_TAG_0x004 = {
7: ('WhiteBalance',
{0: 'Auto',
1: 'Sunny',
2: 'Cloudy',
3: 'Tungsten',
4: 'Fluorescent',
5: 'Flash',
6: 'Custom'}),
9: ('SequenceNumber', ),
14: ('AFPointUsed', ),
15: ('FlashBias',
{0xFFC0: '-2 EV',
0xFFCC: '-1.67 EV',
0xFFD0: '-1.50 EV',
0xFFD4: '-1.33 EV',
0xFFE0: '-1 EV',
0xFFEC: '-0.67 EV',
0xFFF0: '-0.50 EV',
0xFFF4: '-0.33 EV',
0x0000: '0 EV',
0x000C: '0.33 EV',
0x0010: '0.50 EV',
0x0014: '0.67 EV',
0x0020: '1 EV',
0x002C: '1.33 EV',
0x0030: '1.50 EV',
0x0034: '1.67 EV',
0x0040: '2 EV'}),
19: ('SubjectDistance', ),
}
# extract multibyte integer in Motorola format (little endian)
def s2n_motorola(str):
x = 0
for c in str:
x = (x << 8) | ord(c)
return x
# extract multibyte integer in Intel format (big endian)
def s2n_intel(str):
x = 0
y = 0L
for c in str:
x = x | (ord(c) << y)
y = y + 8
return x
# ratio object that eventually will be able to reduce itself to lowest
# common denominator for printing
def gcd(a, b):
if b == 0:
return a
else:
return gcd(b, a % b)
class Ratio:
def __init__(self, num, den):
self.num = num
self.den = den
def __repr__(self):
self.reduce()
if self.den == 1:
return str(self.num)
return '%d/%d' % (self.num, self.den)
def reduce(self):
div = gcd(self.num, self.den)
if div > 1:
self.num = self.num / div
self.den = self.den / div
# for ease of dealing with tags
class IFD_Tag:
def __init__(self, printable, tag, field_type, values, field_offset,
field_length):
# printable version of data
self.printable = printable
# tag ID number
self.tag = tag
# field type as index into FIELD_TYPES
self.field_type = field_type
# offset of start of field in bytes from beginning of IFD
self.field_offset = field_offset
# length of data field in bytes
self.field_length = field_length
# either a string or array of data items
self.values = values
def __str__(self):
return self.printable
def __repr__(self):
return '(0x%04X) %s=%s @ %d' % (self.tag,
FIELD_TYPES[self.field_type][2],
self.printable,
self.field_offset)
# class that handles an EXIF header
class EXIF_header:
def __init__(self, file, endian, offset, fake_exif, strict, debug=0):
self.file = file
self.endian = endian
self.offset = offset
self.fake_exif = fake_exif
self.strict = strict
self.debug = debug
self.tags = {}
# convert slice to integer, based on sign and endian flags
# usually this offset is assumed to be relative to the beginning of the
# start of the EXIF information. For some cameras that use relative tags,
# this offset may be relative to some other starting point.
def s2n(self, offset, length, signed=0):
self.file.seek(self.offset+offset)
slice=self.file.read(length)
if self.endian == 'I':
val=s2n_intel(slice)
else:
val=s2n_motorola(slice)
# Sign extension ?
if signed:
msb=1L << (8*length-1)
if val & msb:
val=val-(msb << 1)
return val
# convert offset to string
def n2s(self, offset, length):
s = ''
for dummy in range(length):
if self.endian == 'I':
s = s + chr(offset & 0xFF)
else:
s = chr(offset & 0xFF) + s
offset = offset >> 8
return s
# return first IFD
def first_IFD(self):
return self.s2n(4, 4)
# return pointer to next IFD
def next_IFD(self, ifd):
entries=self.s2n(ifd, 2)
return self.s2n(ifd+2+12*entries, 4)
# return list of IFDs in header
def list_IFDs(self):
i=self.first_IFD()
a=[]
while i:
a.append(i)
i=self.next_IFD(i)
return a
# return list of entries in this IFD
def dump_IFD(self, ifd, ifd_name, dict=EXIF_TAGS, relative=0, stop_tags=()):
entries=self.s2n(ifd, 2)
stop_tags_length = len(stop_tags)
stop_tags_encountered = 0
for i in range(entries):
# entry is index of start of this IFD in the file
entry = ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
# get tag name early to avoid errors, help debug
tag_entry = dict.get(tag)
if tag_entry:
tag_name = tag_entry[0]
else:
tag_name = 'Tag 0x%04X' % tag
# ignore certain tags for faster processing
if not (not detailed and tag in IGNORE_TAGS):
field_type = self.s2n(entry + 2, 2)
# unknown field type
if not 0 < field_type < len(FIELD_TYPES):
if not self.strict:
continue
else:
raise ValueError('unknown type %d in tag 0x%04X' % (field_type, tag))
typelen = FIELD_TYPES[field_type][0]
count = self.s2n(entry + 4, 4)
# Adjust for tag id/type/count (2+2+4 bytes)
# Now we point at either the data or the 2nd level offset
offset = entry + 8
# If the value fits in 4 bytes, it is inlined, else we
# need to jump ahead again.
if count * typelen > 4:
# offset is not the value; it's a pointer to the value
# if relative we set things up so s2n will seek to the right
# place when it adds self.offset. Note that this 'relative'
# is for the Nikon type 3 makernote. Other cameras may use
# other relative offsets, which would have to be computed here
# slightly differently.
if relative:
tmp_offset = self.s2n(offset, 4)
offset = tmp_offset + ifd - 8
if self.fake_exif:
offset = offset + 18
else:
offset = self.s2n(offset, 4)
field_offset = offset
if field_type == 2:
# special case: null-terminated ASCII string
# XXX investigate
# sometimes gets too big to fit in int value
if count != 0 and count < (2**31):
self.file.seek(self.offset + offset)
values = self.file.read(count)
#print values
# Drop any garbage after a null.
values = values.split('\x00', 1)[0]
else:
values = ''
else:
values = []
signed = (field_type in [6, 8, 9, 10])
# XXX investigate
# some entries get too big to handle could be malformed
# file or problem with self.s2n
if count < 1000:
for dummy in range(count):
if field_type in (5, 10):
# a ratio
value = Ratio(self.s2n(offset, 4, signed),
self.s2n(offset + 4, 4, signed))
else:
value = self.s2n(offset, typelen, signed)
values.append(value)
offset = offset + typelen
# The test above causes problems with tags that are
# supposed to have long values! Fix up one important case.
elif tag_name == 'MakerNote' :
for dummy in range(count):
value = self.s2n(offset, typelen, signed)
values.append(value)
offset = offset + typelen
#else :
# print "Warning: dropping large tag:", tag, tag_name
# now 'values' is either a string or an array
if count == 1 and field_type != 2:
printable=str(values[0])
elif count > 50 and len(values) > 20 :
printable=str( values[0:20] )[0:-1] + ", ... ]"
else:
printable=str(values)
# compute printable version of values
if tag_entry:
if len(tag_entry) != 1:
# optional 2nd tag element is present
if callable(tag_entry[1]):
# call mapping function
printable = tag_entry[1](values)
else:
printable = ''
for i in values:
# use lookup table for this tag
printable += tag_entry[1].get(i, repr(i))
self.tags[ifd_name + ' ' + tag_name] = IFD_Tag(printable, tag,
field_type,
values, field_offset,
count * typelen)
if self.debug:
print ' debug: %s: %s' % (tag_name,
repr(self.tags[ifd_name + ' ' + tag_name]))
if tag_name in stop_tags:
stop_tags_encountered += 1
if stop_tags_encountered >= stop_tags_length:
break
# extract uncompressed TIFF thumbnail (like pulling teeth)
# we take advantage of the pre-existing layout in the thumbnail IFD as
# much as possible
def extract_TIFF_thumbnail(self, thumb_ifd):
entries = self.s2n(thumb_ifd, 2)
# this is header plus offset to IFD ...
if self.endian == 'M':
tiff = 'MM\x00*\x00\x00\x00\x08'
else:
tiff = 'II*\x00\x08\x00\x00\x00'
# ... plus thumbnail IFD data plus a null "next IFD" pointer
self.file.seek(self.offset+thumb_ifd)
tiff += self.file.read(entries*12+2)+'\x00\x00\x00\x00'
# fix up large value offset pointers into data area
for i in range(entries):
entry = thumb_ifd + 2 + 12 * i
tag = self.s2n(entry, 2)
field_type = self.s2n(entry+2, 2)
typelen = FIELD_TYPES[field_type][0]
count = self.s2n(entry+4, 4)
oldoff = self.s2n(entry+8, 4)
# start of the 4-byte pointer area in entry
ptr = i * 12 + 18
# remember strip offsets location
if tag == 0x0111:
strip_off = ptr
strip_len = count * typelen
# is it in the data area?
if count * typelen > 4:
# update offset pointer (nasty "strings are immutable" crap)
# should be able to say "tiff[ptr:ptr+4]=newoff"
newoff = len(tiff)
tiff = tiff[:ptr] + self.n2s(newoff, 4) + tiff[ptr+4:]
# remember strip offsets location
if tag == 0x0111:
strip_off = newoff
strip_len = 4
# get original data and store it
self.file.seek(self.offset + oldoff)
tiff += self.file.read(count * typelen)
# add pixel strips and update strip offset info
old_offsets = self.tags['Thumbnail StripOffsets'].values
old_counts = self.tags['Thumbnail StripByteCounts'].values
for i in range(len(old_offsets)):
# update offset pointer (more nasty "strings are immutable" crap)
offset = self.n2s(len(tiff), strip_len)
tiff = tiff[:strip_off] + offset + tiff[strip_off + strip_len:]
strip_off += strip_len
# add pixel strip to end
self.file.seek(self.offset + old_offsets[i])
tiff += self.file.read(old_counts[i])
self.tags['TIFFThumbnail'] = tiff
# decode all the camera-specific MakerNote formats
# Note is the data that comprises this MakerNote. The MakerNote will
# likely have pointers in it that point to other parts of the file. We'll
# use self.offset as the starting point for most of those pointers, since
# they are relative to the beginning of the file.
#
# If the MakerNote is in a newer format, it may use relative addressing
# within the MakerNote. In that case we'll use relative addresses for the
# pointers.
#
# As an aside: it's not just to be annoying that the manufacturers use
# relative offsets. It's so that if the makernote has to be moved by the
# picture software all of the offsets don't have to be adjusted. Overall,
# this is probably the right strategy for makernotes, though the spec is
# ambiguous. (The spec does not appear to imagine that makernotes would
# follow EXIF format internally. Once they did, it's ambiguous whether
# the offsets should be from the header at the start of all the EXIF info,
# or from the header at the start of the makernote.)
def decode_maker_note(self):
note = self.tags['EXIF MakerNote']
# Some apps use MakerNote tags but do not use a format for which we
# have a description, so just do a raw dump for these.
#if self.tags.has_key('Image Make'):
make = self.tags['Image Make'].printable
#else:
# make = ''
# model = self.tags['Image Model'].printable # unused
# Nikon
# The maker note usually starts with the word Nikon, followed by the
# type of the makernote (1 or 2, as a short). If the word Nikon is
# not at the start of the makernote, it's probably type 2, since some
# cameras work that way.
if 'NIKON' in make:
if note.values[0:7] == [78, 105, 107, 111, 110, 0, 1]:
if self.debug:
print "Looks like a type 1 Nikon MakerNote."
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_NIKON_OLDER_TAGS)
elif note.values[0:7] == [78, 105, 107, 111, 110, 0, 2]:
if self.debug:
print "Looks like a labeled type 2 Nikon MakerNote"
if note.values[12:14] != [0, 42] and note.values[12:14] != [42L, 0L]:
raise ValueError("Missing marker tag '42' in MakerNote.")
# skip the Makernote label and the TIFF header
self.dump_IFD(note.field_offset+10+8, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS, relative=1)
else:
# E99x or D1
if self.debug:
print "Looks like an unlabeled type 2 Nikon MakerNote"
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_NIKON_NEWER_TAGS)
return
# Olympus
if make.startswith('OLYMPUS'):
self.dump_IFD(note.field_offset+8, 'MakerNote',
dict=MAKERNOTE_OLYMPUS_TAGS)
# XXX TODO
#for i in (('MakerNote Tag 0x2020', MAKERNOTE_OLYMPUS_TAG_0x2020),):
# self.decode_olympus_tag(self.tags[i[0]].values, i[1])
#return
# Casio
if 'CASIO' in make or 'Casio' in make:
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CASIO_TAGS)
return
# Fujifilm
if make == 'FUJIFILM':
# bug: everything else is "Motorola" endian, but the MakerNote
# is "Intel" endian
endian = self.endian
self.endian = 'I'
# bug: IFD offsets are from beginning of MakerNote, not
# beginning of file header
offset = self.offset
self.offset += note.field_offset
# process note with bogus values (note is actually at offset 12)
self.dump_IFD(12, 'MakerNote', dict=MAKERNOTE_FUJIFILM_TAGS)
# reset to correct values
self.endian = endian
self.offset = offset
return
# Canon
if make == 'Canon':
self.dump_IFD(note.field_offset, 'MakerNote',
dict=MAKERNOTE_CANON_TAGS)
for i in (('MakerNote Tag 0x0001', MAKERNOTE_CANON_TAG_0x001),
('MakerNote Tag 0x0004', MAKERNOTE_CANON_TAG_0x004)):
self.canon_decode_tag(self.tags[i[0]].values, i[1])
return
# XXX TODO decode Olympus MakerNote tag based on offset within tag
def olympus_decode_tag(self, value, dict):
pass
# decode Canon MakerNote tag based on offset within tag
# see http://www.burren.cx/david/canon.html by David Burren
def canon_decode_tag(self, value, dict):
for i in range(1, len(value)):
x=dict.get(i, ('Unknown', ))
if self.debug:
print i, x
name=x[0]
if len(x) > 1:
val=x[1].get(value[i], 'Unknown')
else:
val=value[i]
# it's not a real IFD Tag but we fake one to make everybody
# happy. this will have a "proprietary" type
self.tags['MakerNote '+name]=IFD_Tag(str(val), None, 0, None,
None, None)
# process an image file (expects an open file object)
# this is the function that has to deal with all the arbitrary nasty bits
# of the EXIF standard
def process_file(f, stop_tags=(), details=True, strict=False, debug=False):
# yah it's cheesy...
global detailed
detailed = details
# by default do not fake an EXIF beginning
fake_exif = 0
# determine whether it's a JPEG or TIFF
data = f.read(12)
if data[0:4] in ['II*\x00', 'MM\x00*']:
# it's a TIFF file
f.seek(0)
endian = f.read(1)
f.read(1)
offset = 0
elif data[0:2] == '\xFF\xD8':
# it's a JPEG file
while data[2] == '\xFF' and data[6:10] in ('JFIF', 'JFXX', 'OLYM', 'Phot'):
length = ord(data[4])*256+ord(data[5])
f.read(length-8)
# fake an EXIF beginning of file
data = '\xFF\x00'+f.read(10)
fake_exif = 1
if data[2] == '\xFF' and data[6:10] == 'Exif':
# detected EXIF header
offset = f.tell()
endian = f.read(1)
else:
# no EXIF information
return {}
else:
# file format not recognized
return {}
# deal with the EXIF info we found
if debug:
print {'I': 'Intel', 'M': 'Motorola'}[endian], 'format'
hdr = EXIF_header(f, endian, offset, fake_exif, strict, debug)
ifd_list = hdr.list_IFDs()
ctr = 0
for i in ifd_list:
if ctr == 0:
IFD_name = 'Image'
elif ctr == 1:
IFD_name = 'Thumbnail'
thumb_ifd = i
else:
IFD_name = 'IFD %d' % ctr
if debug:
print ' IFD %d (%s) at offset %d:' % (ctr, IFD_name, i)
hdr.dump_IFD(i, IFD_name, stop_tags=stop_tags)
# EXIF IFD
exif_off = hdr.tags.get(IFD_name+' ExifOffset')
if exif_off:
if debug:
print ' EXIF SubIFD at offset %d:' % exif_off.values[0]
hdr.dump_IFD(exif_off.values[0], 'EXIF', stop_tags=stop_tags)
# Interoperability IFD contained in EXIF IFD
intr_off = hdr.tags.get('EXIF SubIFD InteroperabilityOffset')
if intr_off:
if debug:
print ' EXIF Interoperability SubSubIFD at offset %d:' \
% intr_off.values[0]
hdr.dump_IFD(intr_off.values[0], 'EXIF Interoperability',
dict=INTR_TAGS, stop_tags=stop_tags)
# GPS IFD
gps_off = hdr.tags.get(IFD_name+' GPSInfo')
if gps_off:
if debug:
print ' GPS SubIFD at offset %d:' % gps_off.values[0]
hdr.dump_IFD(gps_off.values[0], 'GPS', dict=GPS_TAGS, stop_tags=stop_tags)
ctr += 1
# extract uncompressed TIFF thumbnail
thumb = hdr.tags.get('Thumbnail Compression')
if thumb and thumb.printable == 'Uncompressed TIFF':
hdr.extract_TIFF_thumbnail(thumb_ifd)
# JPEG thumbnail (thankfully the JPEG data is stored as a unit)
thumb_off = hdr.tags.get('Thumbnail JPEGInterchangeFormat')
if thumb_off:
f.seek(offset+thumb_off.values[0])
size = hdr.tags['Thumbnail JPEGInterchangeFormatLength'].values[0]
hdr.tags['JPEGThumbnail'] = f.read(size)
# deal with MakerNote contained in EXIF IFD
# (Some apps use MakerNote tags but do not use a format for which we
# have a description, do not process these).
if 'EXIF MakerNote' in hdr.tags and 'Image Make' in hdr.tags and detailed:
hdr.decode_maker_note()
# Sometimes in a TIFF file, a JPEG thumbnail is hidden in the MakerNote
# since it's not allowed in a uncompressed TIFF IFD
if 'JPEGThumbnail' not in hdr.tags:
thumb_off=hdr.tags.get('MakerNote JPEGThumbnail')
if thumb_off:
f.seek(offset+thumb_off.values[0])
hdr.tags['JPEGThumbnail']=file.read(thumb_off.field_length)
return hdr.tags
# show command line usage
def usage(exit_status):
msg = 'Usage: EXIF.py [OPTIONS] file1 [file2 ...]\n'
msg += 'Extract EXIF information from digital camera image files.\n\nOptions:\n'
msg += '-q --quick Do not process MakerNotes.\n'
msg += '-t TAG --stop-tag TAG Stop processing when this tag is retrieved.\n'
msg += '-s --strict Run in strict mode (stop on errors).\n'
msg += '-d --debug Run in debug mode (display extra info).\n'
print msg
sys.exit(exit_status)
# library test/debug function (dump given files)
if __name__ == '__main__':
import sys
import getopt
# parse command line options/arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hqsdt:v", ["help", "quick", "strict", "debug", "stop-tag="])
except getopt.GetoptError:
usage(2)
if args == []:
usage(2)
detailed = True
stop_tags = ()
debug = False
strict = False
for o, a in opts:
if o in ("-h", "--help"):
usage(0)
if o in ("-q", "--quick"):
detailed = False
if o in ("-t", "--stop-tag"):
stop_tags = (a,)
if o in ("-s", "--strict"):
strict = True
if o in ("-d", "--debug"):
debug = True
# output info for each file
for filename in args:
try:
file=open(filename, 'rb')
except:
print "'%s' is unreadable\n"%filename
continue
print filename + ':'
# get the tags
data = process_file(file, stop_tags=stop_tags, details=detailed, strict=strict, debug=debug)
if not data:
print 'No EXIF information found'
continue
x=data.keys()
x.sort()
for i in x:
if i in ('JPEGThumbnail', 'TIFFThumbnail'):
continue
try:
print ' %s (%s): %s' % \
(i, FIELD_TYPES[data[i].field_type][2], data[i].printable)
except:
print 'error', i, '"', data[i], '"'
if 'JPEGThumbnail' in data:
print 'File has JPEG thumbnail'
print
| Python |
#!/usr/bin/python
__author__ = "pkolarov@gmail.com"
import dbhash,anydbm
import sys, os, shelve, logging,string
import threading, Queue
import flickr
user = None
uploaded = None
lock = None
#get one and only one photo for the given tags or None
#this works only if we previously tagged all the pics on Flickr with uploader tool automaticaly
#
#Plus delete images that contain the same TAGS !!!!
def getPhotoIDbyTag(tag):
retries = 0
photos = None
while (retries < 3):
try:
logging.debug(user.id)
photos = flickr.photos_search(user_id=user.id, auth=all, tags=tag,tag_mode='any')
break
except:
logging.error("flickr2history: Flickr error while searching ....retrying")
logging.error(sys.exc_info()[0])
retries = retries + 1
if (not photos or len(photos) == 0):
logging.debug("flickr2history: No image in Flickr (yet) with tags %s (possibly deleted in Flickr by user)" % tag)
return None
logging.debug("flickr2history: Tag=%s found %d" % (tag, len(photos)))
while (len(photos)>1):
logging.debug( "flickr2history :Tag %s matches %d images!" % (tag, len(photos)))
logging.debug("flickr2history: Removing other images")
try:
photos.pop().delete()
except:
logging.error("flickr2history: Flickr error while deleting duplicate image")
logging.error(sys.exc_info()[0])
return photos[0]
class ReshelfThread (threading.Thread):
def __init__(self, threadID, imageDir, imageQueue, historyFile):
threading.Thread.__init__(self)
self.threadID = threadID
self.imageDir = imageDir
self.imageQueue = imageQueue
self.historyFile = historyFile
def has_key(self, image):
global lock
global uploaded
with lock:
return uploaded.has_key(str(image))
def update(self, image, photo):
global lock
global uploaded
with lock:
uploaded[ str(image)] = str(photo.id)
uploaded[ str(photo.id) ] =str(image)
uploaded.close();
uploaded = shelve.open(self.historyFile ) #its better to always reopen this file
def run(self):
logging.debug( "Starting ReshelfThread %d " % self.threadID )
while True:
try:
image = self.imageQueue.get_nowait()
logging.debug( "ReshelfThread %d qSize: %d processing %s" % (self.threadID, self.imageQueue.qsize(), image) )
image = image[len(self.imageDir):] #remove absolute directory
if ( not self.has_key(str(image) ) ):
#each picture should have one id tag in the folder format with spaces replaced by # and starting with #
flickrtag = '#' + image.replace(' ','#')
logging.debug(flickrtag)
photo = getPhotoIDbyTag(flickrtag)
logging.debug(image)
if not photo:
#uploaded.close() # flush the DB file
continue
logging.debug("ReshelfThread: Reregistering %s photo in local history file" % image)
self.update(image, photo)
except Queue.Empty:
break
logging.debug( "Exiting ReshelfThread %d " % self.threadID )
#store image reference in the history file if its not there yet and if we actually can
#find it on Flickr
def reshelf(images, imageDir, historyFile, numThreads):
logging.debug('flickr2history: Started flickr2history')
try:
global user
user = flickr.test_login()
logging.debug(user.id)
except:
logging.error(sys.exc_info()[0])
return None
imageQueue = Queue.Queue();
for image in images:
imageQueue.put_nowait(image)
global uploaded
uploaded = shelve.open( historyFile ) #its better to always reopen this file
global lock
lock = threading.Lock()
threads = []
for i in range(numThreads):
thread = ReshelfThread(i, imageDir, imageQueue, historyFile)
threads.append(thread)
thread.start()
for thrd in threads:
thrd.join()
uploaded.close()
logging.debug('flickr2history: Finished flickr2history')
| Python |
"""xmltramp: Make XML documents easily accessible."""
__version__ = "2.16"
__author__ = "Aaron Swartz"
__credits__ = "Many thanks to pjz, bitsko, and DanC."
__copyright__ = "(C) 2003 Aaron Swartz. GNU GPL 2."
if not hasattr(__builtins__, 'True'): True, False = 1, 0
def isstr(f): return isinstance(f, type('')) or isinstance(f, type(u''))
def islst(f): return isinstance(f, type(())) or isinstance(f, type([]))
empty = {'http://www.w3.org/1999/xhtml': ['img', 'br', 'hr', 'meta', 'link', 'base', 'param', 'input', 'col', 'area']}
def quote(x, elt=True):
if elt and '<' in x and len(x) > 24 and x.find(']]>') == -1: return "<![CDATA["+x+"]]>"
else: x = x.replace('&', '&').replace('<', '<').replace(']]>', ']]>')
if not elt: x = x.replace('"', '"')
return x
class Element:
def __init__(self, name, attrs=None, children=None, prefixes=None):
if islst(name) and name[0] == None: name = name[1]
if attrs:
na = {}
for k in attrs.keys():
if islst(k) and k[0] == None: na[k[1]] = attrs[k]
else: na[k] = attrs[k]
attrs = na
self._name = name
self._attrs = attrs or {}
self._dir = children or []
prefixes = prefixes or {}
self._prefixes = dict(zip(prefixes.values(), prefixes.keys()))
if prefixes: self._dNS = prefixes.get(None, None)
else: self._dNS = None
def __repr__(self, recursive=0, multiline=0, inprefixes=None):
def qname(name, inprefixes):
if islst(name):
if inprefixes[name[0]] is not None:
return inprefixes[name[0]]+':'+name[1]
else:
return name[1]
else:
return name
def arep(a, inprefixes, addns=1):
out = ''
for p in self._prefixes.keys():
if not p in inprefixes.keys():
if addns: out += ' xmlns'
if addns and self._prefixes[p]: out += ':'+self._prefixes[p]
if addns: out += '="'+quote(p, False)+'"'
inprefixes[p] = self._prefixes[p]
for k in a.keys():
out += ' ' + qname(k, inprefixes)+ '="' + quote(a[k], False) + '"'
return out
inprefixes = inprefixes or {u'http://www.w3.org/XML/1998/namespace':'xml'}
# need to call first to set inprefixes:
attributes = arep(self._attrs, inprefixes, recursive)
out = '<' + qname(self._name, inprefixes) + attributes
if not self._dir and (self._name[0] in empty.keys()
and self._name[1] in empty[self._name[0]]):
out += ' />'
return out
out += '>'
if recursive:
content = 0
for x in self._dir:
if isinstance(x, Element): content = 1
pad = '\n' + ('\t' * recursive)
for x in self._dir:
if multiline and content: out += pad
if isstr(x): out += quote(x)
elif isinstance(x, Element):
out += x.__repr__(recursive+1, multiline, inprefixes.copy())
else:
raise TypeError, "I wasn't expecting "+`x`+"."
if multiline and content: out += '\n' + ('\t' * (recursive-1))
else:
if self._dir: out += '...'
out += '</'+qname(self._name, inprefixes)+'>'
return out
def __unicode__(self):
text = ''
for x in self._dir:
text += unicode(x)
return ' '.join(text.split())
def __str__(self):
return self.__unicode__().encode('utf-8')
def __getattr__(self, n):
if n[0] == '_': raise AttributeError, "Use foo['"+n+"'] to access the child element."
if self._dNS: n = (self._dNS, n)
for x in self._dir:
if isinstance(x, Element) and x._name == n: return x
raise AttributeError, 'No child element named \''+n+"'"
def __hasattr__(self, n):
for x in self._dir:
if isinstance(x, Element) and x._name == n: return True
return False
def __setattr__(self, n, v):
if n[0] == '_': self.__dict__[n] = v
else: self[n] = v
def __getitem__(self, n):
if isinstance(n, type(0)): # d[1] == d._dir[1]
return self._dir[n]
elif isinstance(n, slice(0).__class__):
# numerical slices
if isinstance(n.start, type(0)): return self._dir[n.start:n.stop]
# d['foo':] == all <foo>s
n = n.start
if self._dNS and not islst(n): n = (self._dNS, n)
out = []
for x in self._dir:
if isinstance(x, Element) and x._name == n: out.append(x)
return out
else: # d['foo'] == first <foo>
if self._dNS and not islst(n): n = (self._dNS, n)
for x in self._dir:
if isinstance(x, Element) and x._name == n: return x
raise KeyError
def __setitem__(self, n, v):
if isinstance(n, type(0)): # d[1]
self._dir[n] = v
elif isinstance(n, slice(0).__class__):
# d['foo':] adds a new foo
n = n.start
if self._dNS and not islst(n): n = (self._dNS, n)
nv = Element(n)
self._dir.append(nv)
else: # d["foo"] replaces first <foo> and dels rest
if self._dNS and not islst(n): n = (self._dNS, n)
nv = Element(n); nv._dir.append(v)
replaced = False
todel = []
for i in range(len(self)):
if self[i]._name == n:
if replaced:
todel.append(i)
else:
self[i] = nv
replaced = True
if not replaced: self._dir.append(nv)
for i in todel: del self[i]
def __delitem__(self, n):
if isinstance(n, type(0)): del self._dir[n]
elif isinstance(n, slice(0).__class__):
# delete all <foo>s
n = n.start
if self._dNS and not islst(n): n = (self._dNS, n)
for i in range(len(self)):
if self[i]._name == n: del self[i]
else:
# delete first foo
for i in range(len(self)):
if self[i]._name == n: del self[i]
break
def __call__(self, *_pos, **_set):
if _set:
for k in _set.keys(): self._attrs[k] = _set[k]
if len(_pos) > 1:
for i in range(0, len(_pos), 2):
self._attrs[_pos[i]] = _pos[i+1]
if len(_pos) == 1 is not None:
return self._attrs[_pos[0]]
if len(_pos) == 0:
return self._attrs
def __len__(self): return len(self._dir)
class Namespace:
def __init__(self, uri): self.__uri = uri
def __getattr__(self, n): return (self.__uri, n)
def __getitem__(self, n): return (self.__uri, n)
from xml.sax.handler import EntityResolver, DTDHandler, ContentHandler, ErrorHandler
class Seeder(EntityResolver, DTDHandler, ContentHandler, ErrorHandler):
def __init__(self):
self.stack = []
self.ch = ''
self.prefixes = {}
ContentHandler.__init__(self)
def startPrefixMapping(self, prefix, uri):
if not self.prefixes.has_key(prefix): self.prefixes[prefix] = []
self.prefixes[prefix].append(uri)
def endPrefixMapping(self, prefix):
self.prefixes[prefix].pop()
def startElementNS(self, name, qname, attrs):
ch = self.ch; self.ch = ''
if ch and not ch.isspace(): self.stack[-1]._dir.append(ch)
attrs = dict(attrs)
newprefixes = {}
for k in self.prefixes.keys(): newprefixes[k] = self.prefixes[k][-1]
self.stack.append(Element(name, attrs, prefixes=newprefixes.copy()))
def characters(self, ch):
self.ch += ch
def endElementNS(self, name, qname):
ch = self.ch; self.ch = ''
if ch and not ch.isspace(): self.stack[-1]._dir.append(ch)
element = self.stack.pop()
if self.stack:
self.stack[-1]._dir.append(element)
else:
self.result = element
from xml.sax import make_parser
from xml.sax.handler import feature_namespaces
def seed(fileobj):
seeder = Seeder()
parser = make_parser()
parser.setFeature(feature_namespaces, 1)
parser.setContentHandler(seeder)
parser.parse(fileobj)
return seeder.result
def parse(text):
from StringIO import StringIO
return seed(StringIO(text))
def load(url):
import urllib
return seed(urllib.urlopen(url))
def unittest():
parse('<doc>a<baz>f<b>o</b>ob<b>a</b>r</baz>a</doc>').__repr__(1,1) == \
'<doc>\n\ta<baz>\n\t\tf<b>o</b>ob<b>a</b>r\n\t</baz>a\n</doc>'
assert str(parse("<doc />")) == ""
assert str(parse("<doc>I <b>love</b> you.</doc>")) == "I love you."
assert parse("<doc>\nmom\nwow\n</doc>")[0].strip() == "mom\nwow"
assert str(parse('<bing> <bang> <bong>center</bong> </bang> </bing>')) == "center"
assert str(parse('<doc>\xcf\x80</doc>')) == '\xcf\x80'
d = Element('foo', attrs={'foo':'bar'}, children=['hit with a', Element('bar'), Element('bar')])
try:
d._doesnotexist
raise "ExpectedError", "but found success. Damn."
except AttributeError: pass
assert d.bar._name == 'bar'
try:
d.doesnotexist
raise "ExpectedError", "but found success. Damn."
except AttributeError: pass
assert hasattr(d, 'bar') == True
assert d('foo') == 'bar'
d(silly='yes')
assert d('silly') == 'yes'
assert d() == d._attrs
assert d[0] == 'hit with a'
d[0] = 'ice cream'
assert d[0] == 'ice cream'
del d[0]
assert d[0]._name == "bar"
assert len(d[:]) == len(d._dir)
assert len(d[1:]) == len(d._dir) - 1
assert len(d['bar':]) == 2
d['bar':] = 'baz'
assert len(d['bar':]) == 3
assert d['bar']._name == 'bar'
d = Element('foo')
doc = Namespace("http://example.org/bar")
bbc = Namespace("http://example.org/bbc")
dc = Namespace("http://purl.org/dc/elements/1.1/")
d = parse("""<doc version="2.7182818284590451"
xmlns="http://example.org/bar"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:bbc="http://example.org/bbc">
<author>John Polk and John Palfrey</author>
<dc:creator>John Polk</dc:creator>
<dc:creator>John Palfrey</dc:creator>
<bbc:show bbc:station="4">Buffy</bbc:show>
</doc>""")
assert repr(d) == '<doc version="2.7182818284590451">...</doc>'
assert d.__repr__(1) == '<doc xmlns:bbc="http://example.org/bbc" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns="http://example.org/bar" version="2.7182818284590451"><author>John Polk and John Palfrey</author><dc:creator>John Polk</dc:creator><dc:creator>John Palfrey</dc:creator><bbc:show bbc:station="4">Buffy</bbc:show></doc>'
assert d.__repr__(1,1) == '<doc xmlns:bbc="http://example.org/bbc" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns="http://example.org/bar" version="2.7182818284590451">\n\t<author>John Polk and John Palfrey</author>\n\t<dc:creator>John Polk</dc:creator>\n\t<dc:creator>John Palfrey</dc:creator>\n\t<bbc:show bbc:station="4">Buffy</bbc:show>\n</doc>'
assert repr(parse("<doc xml:lang='en' />")) == '<doc xml:lang="en"></doc>'
assert str(d.author) == str(d['author']) == "John Polk and John Palfrey"
assert d.author._name == doc.author
assert str(d[dc.creator]) == "John Polk"
assert d[dc.creator]._name == dc.creator
assert str(d[dc.creator:][1]) == "John Palfrey"
d[dc.creator] = "Me!!!"
assert str(d[dc.creator]) == "Me!!!"
assert len(d[dc.creator:]) == 1
d[dc.creator:] = "You!!!"
assert len(d[dc.creator:]) == 2
assert d[bbc.show](bbc.station) == "4"
d[bbc.show](bbc.station, "5")
assert d[bbc.show](bbc.station) == "5"
e = Element('e')
e.c = '<img src="foo">'
assert e.__repr__(1) == '<e><c><img src="foo"></c></e>'
e.c = '2 > 4'
assert e.__repr__(1) == '<e><c>2 > 4</c></e>'
e.c = 'CDATA sections are <em>closed</em> with ]]>.'
assert e.__repr__(1) == '<e><c>CDATA sections are <em>closed</em> with ]]>.</c></e>'
e.c = parse('<div xmlns="http://www.w3.org/1999/xhtml">i<br /><span></span>love<br />you</div>')
assert e.__repr__(1) == '<e><c><div xmlns="http://www.w3.org/1999/xhtml">i<br /><span></span>love<br />you</div></c></e>'
e = Element('e')
e('c', 'that "sucks"')
assert e.__repr__(1) == '<e c="that "sucks""></e>'
assert quote("]]>") == "]]>"
assert quote('< dkdkdsd dkd sksdksdfsd fsdfdsf]]> kfdfkg >') == '< dkdkdsd dkd sksdksdfsd fsdfdsf]]> kfdfkg >'
assert parse('<x a="<"></x>').__repr__(1) == '<x a="<"></x>'
assert parse('<a xmlns="http://a"><b xmlns="http://b"/></a>').__repr__(1) == '<a xmlns="http://a"><b xmlns="http://b"></b></a>'
if __name__ == '__main__': unittest()
| Python |
#!/usr/bin/python
import dbhash,anydbm
import sys, os, shelve, logging,string
from ConfigParser import *
import flickr
existingSets = None
user = None
configdict = ConfigParser()
configdict.read('uploadr.ini')
deleteAll = configdict.defaults()['remove_all_pics_first'] #set to true if Sets should be called only by the name of the last subfolder
def deleteAllPics( ):
global user
try:
user = flickr.test_login()
logging.debug(user.id)
except:
logging.error(sys.exc_info()[0])
return None
if(deleteAll.startswith('true') == False):
return #check again to be sure if to go one
logging.debug('deleteAll: Started Delete')
retries = 0
#this may take very long time !!!!
while (retries < 3):
try:
photos = []
logging.debug(user.id)
np = flickr.photos_search_pages(user_id=user.id, auth=all, per_page="500")
numPages = int(np)
i = 1
logging.debug("found %d num pages" % numPages)
while ( numPages > 0):
spage = str(i)
photos.extend(flickr.photos_search(user_id=user.id, auth=all, per_page="500", page=spage))
logging.debug( "added %d page to %d pic" % (i, len(photos)))
numPages = numPages - 1
i = i + 1
logging.debug( "got all %d pics to delete" % len(photos))
break
except:
logging.error("deleteAll: Flickr error while searching ....retrying")
logging.error(sys.exc_info()[0])
retries = retries + 1
if (not photos or len(photos) == 0):
logging.debug("deleteAll: No files in Flickr to delete" )
return None
logging.debug("deleteAll: found %d media files to delete" % (len(photos)))
while (len(photos)>1):
try:
photos.pop().delete()
print "deleting pic "
logging.debug("deleteAll: Removed one image... %d images to go" % (len(photos)))
except:
logging.error("deleteAll: Flickr error while deleting image")
logging.error(sys.exc_info()[0])
logging.debug("deleteAll: DONE DELETING - NOTHING ELSE TO DO - EXITING")
os._exit(1)
| Python |
app = None
tray = None
fb = None
| Python |
import urllib
import urllib2
from BeautifulSoup import BeautifulSoup, CData
class FogBugzAPIError(Exception):
pass
class FogBugzLogonError(FogBugzAPIError):
pass
class FogBugzConnectionError(FogBugzAPIError):
pass
class FogBugz:
def __init__(self, url, token=None):
self.__handlerCache = {}
if not url.endswith('/'):
url += '/'
if token:
self._token = token.encode('utf-8')
else:
self_token = None
self._opener = urllib2.build_opener()
try:
soup = BeautifulSoup(self._opener.open(url + 'api.xml'))
except urllib2.URLError:
raise FogBugzConnectionError("Library could not connect to the FogBugz API. Either this installation of FogBugz does not support the API, or the url, %s, is incorrect." % (self._url,))
self._url = url + soup.response.url.string
self.currentFilter = None
def logon(self, username, password):
"""
Logs the user on to FogBugz.
Returns None for a successful login.
"""
if self._token:
self.logoff()
try:
response = self.__makerequest('logon', email=username, password=password)
except FogBugzAPIError, e:
raise FogBugzLogonError(e)
self._token = response.token.string
if type(self._token) == CData:
self._token = self._token.encode('utf-8')
def logoff(self):
"""
Logs off the current user.
"""
self.__makerequest('logoff')
self._token = None
def token(self,token):
"""
Set the token without actually logging on. More secure.
"""
self._token = token.encode('utf-8')
def __makerequest(self, cmd, **kwargs):
kwargs["cmd"] = cmd
if self._token:
kwargs["token"] = self._token
try:
response = BeautifulSoup(self._opener.open(self._url+urllib.urlencode(dict([k, v.encode('utf-8') if isinstance(v,basestring) else v ] for k, v in kwargs.items())))).response
except urllib2.URLError, e:
raise FogBugzConnectionError(e)
except UnicodeDecodeError, e:
print kwargs
raise
if response.error:
raise FogBugzAPIError('Error Code %s: %s' % (response.error['code'], response.error.string,))
return response
def __getattr__(self, name):
"""
Handle all FogBugz API calls.
>>> fb.logon(email@example.com, password)
>>> response = fb.search(q="assignedto:email")
"""
# Let's leave the private stuff to Python
if name.startswith("__"):
raise AttributeError("No such attribute '%s'" % name)
if not self.__handlerCache.has_key(name):
def handler(**kwargs):
return self.__makerequest(name, **kwargs)
self.__handlerCache[name] = handler
return self.__handlerCache[name]
| Python |
from fogmini import global_objects
class Case(object):
def __init__(self, id, name):
self.id = id
self.name = name
@staticmethod
def search(query):
cases = global_objects.fb.search(q=query, cols="ixBug,sTitle")
case_objs = []
for case in cases.findAll("case"):
case_objs.append(Case(case.find("ixbug").contents[0], case.find("stitle").contents[0]))
return case_objs
| Python |
from fogmini import global_objects
class Case(object):
def __init__(self, id, title):
self.id = int(id)
self.title = title
@staticmethod
def search(q):
cases = global_objects.fb.search(q=q, cols="ixBug,sTitle")
case_objs = []
for case in cases.findAll("case"):
case_objs.append(Case(case.find("ixbug").contents[0], case.find("stitle").contents[0]))
return case_objs
| Python |
from fogmini import global_objects
people = []
def get_name_for_email(email):
load_people()
for person in people:
if person.email == email:
return person.name
raise IndexError, "No person found with email address %s." % (email, )
def load_people():
global people
if len(people) > 0:
return
people = [Person(xml) for xml in global_objects.fb.listPeople().findAll("person")]
class Person(object):
def __init__(self, xml):
self.name = xml.find("sfullname").contents[0]
self.email = xml.find("semail").contents[0]
def __str__(self):
return "%s (%s)" % (self.name, self.email)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.