repo stringlengths 7 90 | file_url stringlengths 81 315 | file_path stringlengths 4 228 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 14:38:15 2026-01-05 02:33:18 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/timeline/tests/functional.py | trac/trac/timeline/tests/functional.py | #!/usr/bin/python
from trac.tests.functional import *
class RegressionTestRev5883(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the timeline fix in r5883
From Tim:
the issue was that event.markup was never being output anywhere, so
you actually have to render the template with a wiki modification
and see if '(diff)' shows up as the text in a link
also note that (diff) should _not_ show up for a wiki creation
"""
pagename = random_unique_camel()
self._tester.create_wiki_page(pagename)
self._tester.go_to_timeline()
tc.find(pagename)
tc.notfind(pagename + '.*diff</a>\\)')
self._tester.go_to_wiki(pagename)
tc.formvalue('modifypage', 'action', 'edit')
tc.submit()
tc.find('Editing ' + pagename)
tc.formvalue('edit', 'text', random_page())
tc.formvalue('edit', 'comment', random_sentence())
tc.submit('save')
self._tester.go_to_timeline()
tc.find(pagename + '.*diff</a>\\)')
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional.testcases
suite = trac.tests.functional.testcases.functionalSuite()
suite.addTest(RegressionTestRev5883())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/timeline/tests/__init__.py | trac/trac/timeline/tests/__init__.py | from trac.timeline.tests.functional import functionalSuite
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/prefs/api.py | trac/trac/prefs/api.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import *
class IPreferencePanelProvider(Interface):
def get_preference_panels(req):
"""Return a list of available preference panels.
The items returned by this function must be tuple of the form
`(panel, label)`.
"""
def render_preference_panel(req, panel):
"""Process a request for a preference panel.
This function should return a tuple of the form `(template, data)`,
where `template` is the name of the template to use and `data` is the
data to be passed to the template.
"""
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/prefs/web_ui.py | trac/trac/prefs/web_ui.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2005 Daniel Lundin <daniel@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
import pkg_resources
import re
try:
from babel.core import Locale
except ImportError:
Locale = None
from genshi.builder import tag
from trac.core import *
from trac.prefs.api import IPreferencePanelProvider
from trac.util.datefmt import all_timezones, get_timezone, localtz
from trac.util.translation import _, get_available_locales
from trac.web import HTTPNotFound, IRequestHandler
from trac.web.chrome import add_notice, add_stylesheet, \
INavigationContributor, ITemplateProvider
class PreferencesModule(Component):
panel_providers = ExtensionPoint(IPreferencePanelProvider)
implements(INavigationContributor, IPreferencePanelProvider,
IRequestHandler, ITemplateProvider)
_form_fields = [
'newsid', 'name', 'email', 'tz', 'lc_time', 'dateinfo',
'language', 'accesskeys',
'ui.use_symbols', 'ui.hide_help',
]
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'prefs'
def get_navigation_items(self, req):
yield ('metanav', 'prefs',
tag.a(_('Preferences'), href=req.href.prefs()))
# IRequestHandler methods
def match_request(self, req):
match = re.match('/prefs(?:/([^/]+))?$', req.path_info)
if match:
req.args['panel_id'] = match.group(1)
return True
def process_request(self, req):
xhr = req.get_header('X-Requested-With') == 'XMLHttpRequest'
if xhr and req.method == 'POST' and 'save_prefs' in req.args:
self._do_save_xhr(req)
panel_id = req.args['panel_id']
panels = []
chosen_provider = None
for provider in self.panel_providers:
for name, label in provider.get_preference_panels(req) or []:
if name == panel_id or None:
chosen_provider = provider
panels.append((name, label))
if not chosen_provider:
self.log.warn('Unknown preference panel %r', panel_id)
raise HTTPNotFound(_('Unknown preference panel'))
template, data = chosen_provider.render_preference_panel(req, panel_id)
data.update({'active_panel': panel_id, 'panels': panels})
add_stylesheet(req, 'common/css/prefs.css')
return template, data, None
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield (None, _('General'))
yield ('datetime', _('Date & Time'))
yield ('keybindings', _('Keyboard Shortcuts'))
yield ('userinterface', _('User Interface'))
if Locale:
yield ('language', _('Language'))
if not req.authname or req.authname == 'anonymous':
yield ('advanced', _('Advanced'))
def render_preference_panel(self, req, panel):
if req.method == 'POST':
if 'restore' in req.args:
self._do_load(req)
else:
self._do_save(req)
req.redirect(req.href.prefs(panel or None))
data = {
'settings': {'session': req.session, 'session_id': req.session.sid},
'timezones': all_timezones, 'timezone': get_timezone,
'localtz': localtz
}
if Locale:
locales = [Locale.parse(locale)
for locale in get_available_locales()]
languages = sorted((str(locale), locale.display_name)
for locale in locales)
data['locales'] = locales
data['languages'] = languages
return 'prefs_%s.html' % (panel or 'general'), data
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac.prefs', 'templates')]
# Internal methods
def _do_save_xhr(self, req):
for key in req.args:
if not key in ['save_prefs', 'panel_id']:
req.session[key] = req.args[key]
req.session.save()
req.send_no_content()
def _do_save(self, req):
for field in self._form_fields:
val = req.args.get(field, '').strip()
if val:
if field == 'tz' and 'tz' in req.session and \
val not in all_timezones:
del req.session['tz']
elif field == 'newsid':
req.session.change_sid(val)
elif field == 'accesskeys':
req.session[field] = '1'
else:
req.session[field] = val
elif field in req.session and (field in req.args or
field + '_cb' in req.args):
del req.session[field]
add_notice(req, _('Your preferences have been saved.'))
def _do_load(self, req):
if req.authname == 'anonymous':
oldsid = req.args.get('loadsid')
if oldsid:
req.session.get_session(oldsid)
add_notice(req, _('The session has been loaded.'))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/prefs/__init__.py | trac/trac/prefs/__init__.py | from trac.prefs.api import *
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/prefs/tests/functional.py | trac/trac/prefs/tests/functional.py | #!/usr/bin/python
from trac.tests.functional import *
#TODO: split this into multiple smaller testcases
class TestPreferences(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Set preferences for admin user"""
prefs_url = self._tester.url + "/prefs"
tc.follow('Preferences')
tc.url(prefs_url)
tc.notfind('Your preferences have been saved.')
tc.formvalue('userprefs', 'name', ' System Administrator ')
tc.formvalue('userprefs', 'email', ' admin@example.com ')
tc.submit()
tc.find('Your preferences have been saved.')
tc.follow('Date & Time')
tc.url(prefs_url + '/datetime')
tc.formvalue('userprefs', 'tz', 'GMT -10:00')
tc.submit()
tc.find('Your preferences have been saved.')
tc.follow('General')
tc.url(prefs_url)
tc.notfind('Your preferences have been saved.')
tc.find('value="System Administrator"')
tc.find(r'value="admin@example\.com"')
tc.follow('Date & Time')
tc.url(prefs_url + '/datetime')
tc.find('GMT -10:00')
class RegressionTestRev5785(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the fix in r5785"""
prefs_url = self._tester.url + "/prefs"
tc.follow('Preferences')
tc.url(prefs_url)
tc.follow('Logout')
tc.notfind(internal_error) # See [5785]
tc.follow('Login')
class RegressionTestTicket5765(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5765
Unable to turn off 'Enable access keys' in Preferences
"""
self._tester.go_to_front()
tc.follow('Preferences')
tc.follow('Keyboard Shortcuts')
tc.formvalue('userprefs', 'accesskeys', True)
tc.submit()
tc.find('name="accesskeys".*checked="checked"')
tc.formvalue('userprefs', 'accesskeys', False)
tc.submit()
tc.notfind('name="accesskeys".*checked="checked"')
def functionalSuite(suite=None):
if not suite:
import trac.tests.functional.testcases
suite = trac.tests.functional.testcases.functionalSuite()
suite.addTest(TestPreferences())
suite.addTest(RegressionTestRev5785())
suite.addTest(RegressionTestTicket5765())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/prefs/tests/__init__.py | trac/trac/prefs/tests/__init__.py | from trac.prefs.tests.functional import functionalSuite
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/api.py | trac/trac/mimeview/api.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2010 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006-2007 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
# Christian Boos <cboos@edgewall.org>
"""
File metadata management
------------------------
The `trac.mimeview` package centralizes the intelligence related to
file metadata, principally concerning the `type` (MIME type) of the
content and, if relevant, concerning the text encoding (charset) used
by the content.
There are primarily two approaches for getting the MIME type of a
given file, either taking advantage of existing conventions for the
file name, or examining the file content and applying various
heuristics.
The module also knows how to convert the file content from one type to
another type.
In some cases, only the `url` pointing to the file's content is
actually needed, that's why we avoid to read the file's content when
it's not needed.
The actual `content` to be converted might be a `unicode` object, but
it can also be the raw byte string (`str`) object, or simply an object
that can be `read()`.
.. note:: (for plugin developers)
The Mimeview API is quite complex and many things there are
currently a bit difficult to work with (e.g. what an actual
`content` might be, see the last paragraph of this description).
So this area is mainly in a ''work in progress'' state, which will
be improved along the lines described in :teo:`#3332`.
In particular, if you are interested in writing `IContentConverter`
and `IHTMLPreviewRenderer` components, note that those interfaces
will be merged into a new style `IContentConverter`. Feel free to
contribute remarks and suggestions for improvements to the
corresponding ticket (#3332 as well).
"""
import re
from StringIO import StringIO
from genshi import Markup, Stream
from genshi.core import TEXT, START, END, START_NS, END_NS
from genshi.builder import Fragment, tag
from genshi.input import HTMLParser
from trac.config import IntOption, ListOption, Option
from trac.core import *
from trac.resource import Resource
from trac.util import Ranges, content_disposition
from trac.util.text import exception_to_unicode, to_utf8, to_unicode
from trac.util.translation import _, tag_
__all__ = ['Context', 'Mimeview', 'RenderingContext', 'get_mimetype',
'is_binary', 'detect_unicode', 'content_to_unicode', 'ct_mimetype']
class RenderingContext(object):
"""
A rendering context specifies ''how'' the content should be rendered.
It holds together all the needed contextual information that will be
needed by individual renderer components.
To that end, a context keeps track of the Href instance (`.href`) which
should be used as a base for building URLs.
It also provides a `PermissionCache` (`.perm`) which can be used to
restrict the output so that only the authorized information is shown.
A rendering context may also be associated to some Trac resource which
will be used as the implicit reference when rendering relative links
or for retrieving relative content and can be used to retrieve related
metadata.
Rendering contexts can be nested, and a new context can be created from
an existing context using the call syntax. The previous context can be
retrieved using the `.parent` attribute.
For example, when rendering a wiki text of a wiki page, the context will
be associated to a resource identifying that wiki page.
If that wiki text contains a `[[TicketQuery]]` wiki macro, the macro will
set up nested contexts for each matching ticket that will be used for
rendering the ticket descriptions.
:since: version 0.11
"""
def __init__(self, resource, href=None, perm=None):
"""Directly create a `RenderingContext`.
:param resource: the associated resource
:type resource: `Resource`
:param href: an `Href` object suitable for creating URLs
:param perm: a `PermissionCache` object used for restricting the
generated output to "authorized" information only.
The actual `.perm` attribute of the rendering context will be bound
to the given `resource` so that fine-grained permission checks will
apply to that.
"""
self.parent = None #: The parent context, if any
self.resource = resource
self.href = href
self.perm = perm(resource) if perm and resource else perm
self._hints = None
@staticmethod
def from_request(*args, **kwargs):
""":deprecated: since 1.0, use `web_context` instead."""
from trac.web.chrome import web_context
return web_context(*args, **kwargs)
def __repr__(self):
path = []
context = self
while context:
if context.resource.realm: # skip toplevel resource
path.append(repr(context.resource))
context = context.parent
return '<%s %s>' % (type(self).__name__, ' - '.join(reversed(path)))
def child(self, resource=None, id=False, version=False, parent=False):
"""Create a nested rendering context.
`self` will be the parent for the new nested context.
:param resource: either a `Resource` object or the realm string for a
resource specification to be associated to the new
context. If `None`, the resource will be the same
as the resource of the parent context.
:param id: the identifier part of the resource specification
:param version: the version of the resource specification
:return: the new context object
:rtype: `RenderingContext`
>>> context = RenderingContext('wiki', 'WikiStart')
>>> ticket1 = Resource('ticket', 1)
>>> context.child('ticket', 1).resource == ticket1
True
>>> context.child(ticket1).resource is ticket1
True
>>> context.child(ticket1)().resource is ticket1
True
"""
if resource:
resource = Resource(resource, id=id, version=version,
parent=parent)
else:
resource = self.resource
context = RenderingContext(resource, href=self.href, perm=self.perm)
context.parent = self
# hack for context instances created by from_request()
# this is needed because various parts of the code rely on a request
# object being available, but that will hopefully improve in the
# future
if hasattr(self, 'req'):
context.req = self.req
return context
__call__ = child
def __contains__(self, resource):
"""Check whether a resource is in the rendering path.
The primary use for this check is to avoid to render the content of a
resource if we're already embedded in a context associated to that
resource.
:param resource: a `Resource` specification which will be checked for
"""
context = self
while context:
if context.resource and \
context.resource.realm == resource.realm and \
context.resource.id == resource.id:
# we don't care about version here
return True
context = context.parent
# Rendering hints
#
# A rendering hint is a key/value pairs that can influence renderers,
# wiki formatters and processors in the way they produce their output.
# The keys are strings, but the values could be anything.
#
# In nested contexts, the hints are inherited from their parent context,
# unless overriden locally.
def set_hints(self, **keyvalues):
"""Set rendering hints for this rendering context.
>>> ctx = RenderingContext('timeline')
>>> ctx.set_hints(wiki_flavor='oneliner', shorten_lines=True)
>>> t_ctx = ctx('ticket', 1)
>>> t_ctx.set_hints(wiki_flavor='html', preserve_newlines=True)
>>> (t_ctx.get_hint('wiki_flavor'), t_ctx.get_hint('shorten_lines'), \
t_ctx.get_hint('preserve_newlines'))
('html', True, True)
>>> (ctx.get_hint('wiki_flavor'), ctx.get_hint('shorten_lines'), \
ctx.get_hint('preserve_newlines'))
('oneliner', True, None)
"""
if self._hints is None:
self._hints = {}
hints = self._parent_hints()
if hints is not None:
self._hints.update(hints)
self._hints.update(keyvalues)
def get_hint(self, hint, default=None):
"""Retrieve a rendering hint from this context or an ancestor context.
>>> ctx = RenderingContext('timeline')
>>> ctx.set_hints(wiki_flavor='oneliner')
>>> t_ctx = ctx('ticket', 1)
>>> t_ctx.get_hint('wiki_flavor')
'oneliner'
>>> t_ctx.get_hint('preserve_newlines', True)
True
"""
hints = self._hints
if hints is None:
hints = self._parent_hints()
if hints is None:
return default
return hints.get(hint, default)
def has_hint(self, hint):
"""Test whether a rendering hint is defined in this context or in some
ancestor context.
>>> ctx = RenderingContext('timeline')
>>> ctx.set_hints(wiki_flavor='oneliner')
>>> t_ctx = ctx('ticket', 1)
>>> t_ctx.has_hint('wiki_flavor')
True
>>> t_ctx.has_hint('preserve_newlines')
False
"""
hints = self._hints
if hints is None:
hints = self._parent_hints()
if hints is None:
return False
return hint in hints
def _parent_hints(self):
p = self.parent
while p and p._hints is None:
p = p.parent
return p and p._hints
class Context(RenderingContext):
""":deprecated: old name kept for compatibility, use `RenderingContext`."""
# Some common MIME types and their associated keywords and/or file extensions
KNOWN_MIME_TYPES = {
'application/javascript': 'js',
'application/msword': 'doc dot',
'application/pdf': 'pdf',
'application/postscript': 'ps',
'application/rtf': 'rtf',
'application/x-sh': 'sh',
'application/x-csh': 'csh',
'application/x-troff': 'nroff roff troff',
'application/x-yaml': 'yml yaml',
'application/rss+xml': 'rss',
'application/xsl+xml': 'xsl',
'application/xslt+xml': 'xslt',
'image/x-icon': 'ico',
'image/svg+xml': 'svg',
'model/vrml': 'vrml wrl',
'text/css': 'css',
'text/html': 'html htm',
'text/plain': 'txt TXT text README INSTALL '
'AUTHORS COPYING ChangeLog RELEASE',
'text/xml': 'xml',
# see also TEXT_X_TYPES below
'text/x-csrc': 'c xs',
'text/x-chdr': 'h',
'text/x-c++src': 'cc CC cpp C c++ C++',
'text/x-c++hdr': 'hh HH hpp H',
'text/x-csharp': 'cs c# C#',
'text/x-diff': 'patch',
'text/x-eiffel': 'e',
'text/x-elisp': 'el',
'text/x-fortran': 'f',
'text/x-haskell': 'hs',
'text/x-ini': 'ini cfg',
'text/x-objc': 'm mm',
'text/x-ocaml': 'ml mli',
'text/x-makefile': 'make mk Makefile GNUMakefile',
'text/x-pascal': 'pas',
'text/x-perl': 'pl pm PL',
'text/x-php': 'php3 php4',
'text/x-python': 'py',
'text/x-pyrex': 'pyx',
'text/x-ruby': 'rb',
'text/x-scheme': 'scm',
'text/x-textile': 'txtl',
'text/x-vba': 'vb vba bas',
'text/x-verilog': 'v',
'text/x-vhdl': 'vhd',
}
for t in KNOWN_MIME_TYPES.keys():
types = KNOWN_MIME_TYPES[t].split()
if t.startswith('text/x-'):
types.append(t[len('text/x-'):])
KNOWN_MIME_TYPES[t] = types
# extend the above with simple (text/x-<something>: <something>) mappings
TEXT_X_TYPES = """
ada asm asp awk idl inf java ksh lua m4 mail psp rfc rst sql tcl tex zsh
"""
for x in TEXT_X_TYPES.split():
KNOWN_MIME_TYPES.setdefault('text/x-%s' % x, []).append(x)
# Default mapping from keywords/extensions to known MIME types:
MIME_MAP = {}
for t, exts in KNOWN_MIME_TYPES.items():
MIME_MAP[t] = t
for e in exts:
MIME_MAP[e] = t
# Simple builtin autodetection from the content using a regexp
MODE_RE = re.compile(r"""
\#!.+?env\s+(\w+) # 1. look for shebang with env
| \#!(?:[/\w.-_]+/)?(\w+) # 2. look for regular shebang
| -\*-\s*(?:mode:\s*)?([\w+-]+)\s*-\*- # 3. look for Emacs' -*- mode -*-
| vim:.*?(?:syntax|filetype|ft)=(\w+) # 4. look for VIM's syntax=<n>
""", re.VERBOSE)
def get_mimetype(filename, content=None, mime_map=MIME_MAP,
mime_map_patterns={}):
"""Guess the most probable MIME type of a file with the given name.
`filename` is either a filename (the lookup will then use the suffix)
or some arbitrary keyword.
`content` is either a `str` or an `unicode` string.
"""
# 0) mimetype from filename pattern (most specific)
for mimetype, regexp in mime_map_patterns.iteritems():
if regexp.match(filename):
return mimetype
suffix = filename.split('.')[-1]
if suffix in mime_map:
# 1) mimetype from the suffix, using the `mime_map`
return mime_map[suffix]
else:
mimetype = None
try:
import mimetypes
# 2) mimetype from the suffix, using the `mimetypes` module
mimetype = mimetypes.guess_type(filename)[0]
except Exception:
pass
if not mimetype and content:
match = re.search(MODE_RE, content[:1000] + content[-1000:])
if match:
mode = match.group(1) or match.group(2) or match.group(4) or \
match.group(3).lower()
if mode in mime_map:
# 3) mimetype from the content, using the `MODE_RE`
return mime_map[mode]
else:
if is_binary(content):
# 4) mimetype from the content, using`is_binary`
return 'application/octet-stream'
return mimetype
def ct_mimetype(content_type):
"""Return the mimetype part of a content type."""
return (content_type or '').split(';')[0].strip()
def is_binary(data):
"""Detect binary content by checking the first thousand bytes for zeroes.
Operate on either `str` or `unicode` strings.
"""
if isinstance(data, str) and detect_unicode(data):
return False
return '\0' in data[:1000]
def detect_unicode(data):
"""Detect different unicode charsets by looking for BOMs (Byte Order Mark).
Operate obviously only on `str` objects.
"""
if data.startswith('\xff\xfe'):
return 'utf-16-le'
elif data.startswith('\xfe\xff'):
return 'utf-16-be'
elif data.startswith('\xef\xbb\xbf'):
return 'utf-8'
else:
return None
def content_to_unicode(env, content, mimetype):
"""Retrieve an `unicode` object from a `content` to be previewed.
In case the raw content had an unicode BOM, we remove it.
>>> from trac.test import EnvironmentStub
>>> env = EnvironmentStub()
>>> content_to_unicode(env, u"\ufeffNo BOM! h\u00e9 !", '')
u'No BOM! h\\xe9 !'
>>> content_to_unicode(env, "\xef\xbb\xbfNo BOM! h\xc3\xa9 !", '')
u'No BOM! h\\xe9 !'
"""
mimeview = Mimeview(env)
if hasattr(content, 'read'):
content = content.read(mimeview.max_preview_size)
u = mimeview.to_unicode(content, mimetype)
if u and u[0] == u'\ufeff':
u = u[1:]
return u
class IHTMLPreviewRenderer(Interface):
"""Extension point interface for components that add HTML renderers of
specific content types to the `Mimeview` component.
.. note::
This interface will be merged with IContentConverter, as
conversion to text/html will simply be a particular content
conversion.
Note however that the IHTMLPreviewRenderer will still be
supported for a while through an adapter, whereas the
IContentConverter interface itself will be changed.
So if all you want to do is convert to HTML and don't feel like
following the API changes, you should rather implement this
interface for the time being.
"""
#: implementing classes should set this property to True if they
#: support text content where Trac should expand tabs into spaces
expand_tabs = False
#: indicate whether the output of this renderer is source code that can
#: be decorated with annotations
returns_source = False
def get_extra_mimetypes():
"""Augment the Mimeview system with new mimetypes associations.
This is an optional method. Not implementing the method or
returning nothing is fine, the component will still be asked
via `get_quality_ratio` if it supports a known mimetype. But
implementing it can be useful when the component knows about
additional mimetypes which may augment the list of already
mimetype to keywords associations.
Generate ``(mimetype, keywords)`` pairs for each additional
mimetype, with ``keywords`` being a list of keywords or
extensions that can be used as aliases for the mimetype
(typically file suffixes or Wiki processor keys).
.. versionadded:: 1.0
"""
def get_quality_ratio(mimetype):
"""Return the level of support this renderer provides for the `content`
of the specified MIME type. The return value must be a number between
0 and 9, where 0 means no support and 9 means "perfect" support.
"""
def render(context, mimetype, content, filename=None, url=None):
"""Render an XHTML preview of the raw `content` in a RenderingContext.
The `content` might be:
* a `str` object
* an `unicode` string
* any object with a `read` method, returning one of the above
It is assumed that the content will correspond to the given `mimetype`.
Besides the `content` value, the same content may eventually
be available through the `filename` or `url` parameters.
This is useful for renderers that embed objects, using <object> or
<img> instead of including the content inline.
Can return the generated XHTML text as a single string or as an
iterable that yields strings. In the latter case, the list will
be considered to correspond to lines of text in the original content.
"""
class IHTMLPreviewAnnotator(Interface):
"""Extension point interface for components that can annotate an XHTML
representation of file contents with additional information."""
def get_annotation_type():
"""Return a (type, label, description) tuple
that defines the type of annotation and provides human readable names.
The `type` element should be unique to the annotator.
The `label` element is used as column heading for the table,
while `description` is used as a display name to let the user
toggle the appearance of the annotation type.
"""
def get_annotation_data(context):
"""Return some metadata to be used by the `annotate_row` method below.
This will be called only once, before lines are processed.
If this raises an error, that annotator won't be used.
"""
def annotate_row(context, row, number, line, data):
"""Return the XHTML markup for the table cell that contains the
annotation data.
`context` is the context corresponding to the content being annotated,
`row` is the tr Element being built, `number` is the line number being
processed and `line` is the line's actual content.
`data` is whatever additional data the `get_annotation_data` method
decided to provide.
"""
class IContentConverter(Interface):
"""An extension point interface for generic MIME based content
conversion.
.. note:: This api will likely change in the future (see :teo:`#3332`)
"""
def get_supported_conversions():
"""Return an iterable of tuples in the form (key, name, extension,
in_mimetype, out_mimetype, quality) representing the MIME conversions
supported and
the quality ratio of the conversion in the range 0 to 9, where 0 means
no support and 9 means "perfect" support. eg. ('latex', 'LaTeX', 'tex',
'text/x-trac-wiki', 'text/plain', 8)"""
def convert_content(req, mimetype, content, key):
"""Convert the given content from mimetype to the output MIME type
represented by key. Returns a tuple in the form (content,
output_mime_type) or None if conversion is not possible."""
class Content(object):
"""A lazy file-like object that only reads `input` if necessary."""
def __init__(self, input, max_size):
self.input = input
self.max_size = max_size
self.content = None
def read(self, size=-1):
if size == 0:
return ''
if self.content is None:
self.content = StringIO(self.input.read(self.max_size))
return self.content.read(size)
def reset(self):
if self.content is not None:
self.content.seek(0)
class Mimeview(Component):
"""Generic HTML renderer for data, typically source code."""
required = True
renderers = ExtensionPoint(IHTMLPreviewRenderer)
annotators = ExtensionPoint(IHTMLPreviewAnnotator)
converters = ExtensionPoint(IContentConverter)
default_charset = Option('trac', 'default_charset', 'utf-8',
"""Charset to be used when in doubt.""")
tab_width = IntOption('mimeviewer', 'tab_width', 8,
"""Displayed tab width in file preview. (''since 0.9'')""")
max_preview_size = IntOption('mimeviewer', 'max_preview_size', 262144,
"""Maximum file size for HTML preview. (''since 0.9'')""")
mime_map = ListOption('mimeviewer', 'mime_map',
'text/x-dylan:dylan, text/x-idl:ice, text/x-ada:ads:adb',
doc="""List of additional MIME types and keyword mappings.
Mappings are comma-separated, and for each MIME type,
there's a colon (":") separated list of associated keywords
or file extensions. (''since 0.10'')""")
mime_map_patterns = ListOption('mimeviewer', 'mime_map_patterns',
'text/plain:README|INSTALL|COPYING.*',
doc="""List of additional MIME types associated to filename patterns.
Mappings are comma-separated, and each mapping consists of a MIME type
and a Python regexp used for matching filenames, separated by a colon
(":"). (''since 1.0'')""")
treat_as_binary = ListOption('mimeviewer', 'treat_as_binary',
'application/octet-stream, application/pdf, application/postscript, '
'application/msword,application/rtf,',
doc="""Comma-separated list of MIME types that should be treated as
binary data. (''since 0.11.5'')""")
def __init__(self):
self._mime_map = None
self._mime_map_patterns = None
# Public API
def get_supported_conversions(self, mimetype):
"""Return a list of target MIME types in same form as
`IContentConverter.get_supported_conversions()`, but with the converter
component appended. Output is ordered from best to worst quality."""
converters = []
for converter in self.converters:
conversions = converter.get_supported_conversions() or []
for k, n, e, im, om, q in conversions:
if im == mimetype and q > 0:
converters.append((k, n, e, im, om, q, converter))
converters = sorted(converters, key=lambda i: i[-2], reverse=True)
return converters
def convert_content(self, req, mimetype, content, key, filename=None,
url=None):
"""Convert the given content to the target MIME type represented by
`key`, which can be either a MIME type or a key. Returns a tuple of
(content, output_mime_type, extension)."""
if not content:
return ('', 'text/plain;charset=utf-8', '.txt')
# Ensure we have a MIME type for this content
full_mimetype = mimetype
if not full_mimetype:
if hasattr(content, 'read'):
content = content.read(self.max_preview_size)
full_mimetype = self.get_mimetype(filename, content)
if full_mimetype:
mimetype = ct_mimetype(full_mimetype) # split off charset
else:
mimetype = full_mimetype = 'text/plain' # fallback if not binary
# Choose best converter
candidates = list(self.get_supported_conversions(mimetype) or [])
candidates = [c for c in candidates if key in (c[0], c[4])]
if not candidates:
raise TracError(
_("No available MIME conversions from %(old)s to %(new)s",
old=mimetype, new=key))
# First successful conversion wins
for ck, name, ext, input_mimettype, output_mimetype, quality, \
converter in candidates:
output = converter.convert_content(req, mimetype, content, ck)
if output:
return (output[0], output[1], ext)
raise TracError(
_("No available MIME conversions from %(old)s to %(new)s",
old=mimetype, new=key))
def get_annotation_types(self):
"""Generator that returns all available annotation types."""
for annotator in self.annotators:
yield annotator.get_annotation_type()
def render(self, context, mimetype, content, filename=None, url=None,
annotations=None, force_source=False):
"""Render an XHTML preview of the given `content`.
`content` is the same as an `IHTMLPreviewRenderer.render`'s
`content` argument.
The specified `mimetype` will be used to select the most appropriate
`IHTMLPreviewRenderer` implementation available for this MIME type.
If not given, the MIME type will be infered from the filename or the
content.
Return a string containing the XHTML text.
When rendering with an `IHTMLPreviewRenderer` fails, a warning is added
to the request associated with the context (if any), unless the
`disable_warnings` hint is set to `True`.
"""
if not content:
return ''
if not isinstance(context, RenderingContext):
raise TypeError("RenderingContext expected (since 0.11)")
# Ensure we have a MIME type for this content
full_mimetype = mimetype
if not full_mimetype:
if hasattr(content, 'read'):
content = content.read(self.max_preview_size)
full_mimetype = self.get_mimetype(filename, content)
if full_mimetype:
mimetype = ct_mimetype(full_mimetype) # split off charset
else:
mimetype = full_mimetype = 'text/plain' # fallback if not binary
# Determine candidate `IHTMLPreviewRenderer`s
candidates = []
for renderer in self.renderers:
qr = renderer.get_quality_ratio(mimetype)
if qr > 0:
candidates.append((qr, renderer))
candidates.sort(lambda x, y: cmp(y[0], x[0]))
# Wrap file-like object so that it can be read multiple times
if hasattr(content, 'read'):
content = Content(content, self.max_preview_size)
# First candidate which renders successfully wins.
# Also, we don't want to expand tabs more than once.
expanded_content = None
for qr, renderer in candidates:
if force_source and not getattr(renderer, 'returns_source', False):
continue # skip non-source renderers in force_source mode
if isinstance(content, Content):
content.reset()
try:
ann_names = ', '.join(annotations) if annotations else \
'no annotations'
self.log.debug('Trying to render HTML preview using %s [%s]',
renderer.__class__.__name__, ann_names)
# check if we need to perform a tab expansion
rendered_content = content
if getattr(renderer, 'expand_tabs', False):
if expanded_content is None:
content = content_to_unicode(self.env, content,
full_mimetype)
expanded_content = content.expandtabs(self.tab_width)
rendered_content = expanded_content
result = renderer.render(context, full_mimetype,
rendered_content, filename, url)
if not result:
continue
if not (force_source or getattr(renderer, 'returns_source',
False)):
# Direct rendering of content
if isinstance(result, basestring):
if not isinstance(result, unicode):
result = to_unicode(result)
return Markup(to_unicode(result))
elif isinstance(result, Fragment):
return result.generate()
else:
return result
# Render content as source code
if annotations:
m = context.req.args.get('marks') if context.req else None
return self._render_source(context, result, annotations,
m and Ranges(m))
else:
if isinstance(result, list):
result = Markup('\n').join(result)
return tag.div(class_='code')(tag.pre(result)).generate()
except Exception, e:
self.log.warning('HTML preview using %s failed: %s',
renderer.__class__.__name__,
exception_to_unicode(e, traceback=True))
if context.req and not context.get_hint('disable_warnings'):
from trac.web.chrome import add_warning
add_warning(context.req,
_("HTML preview using %(renderer)s failed (%(err)s)",
renderer=renderer.__class__.__name__,
err=exception_to_unicode(e)))
def _render_source(self, context, stream, annotations, marks=None):
from trac.web.chrome import add_warning
annotators, labels, titles = {}, {}, {}
for annotator in self.annotators:
atype, alabel, atitle = annotator.get_annotation_type()
if atype in annotations:
labels[atype] = alabel
titles[atype] = atitle
annotators[atype] = annotator
annotations = [a for a in annotations if a in annotators]
if isinstance(stream, list):
stream = HTMLParser(StringIO(u'\n'.join(stream)))
elif isinstance(stream, unicode):
text = stream
def linesplitter():
for line in text.splitlines(True):
yield TEXT, line, (None, -1, -1)
stream = linesplitter()
annotator_datas = []
for a in annotations:
annotator = annotators[a]
try:
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/txtl.py | trac/trac/mimeview/txtl.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
"""Trac support for Textile
See also: http://dealmeida.net/projects/textile/
"""
from trac.core import *
from trac.mimeview.api import IHTMLPreviewRenderer
class TextileRenderer(Component):
"""Renders plain text in Textile format as HTML."""
implements(IHTMLPreviewRenderer)
def get_quality_ratio(self, mimetype):
if mimetype == 'text/x-textile':
return 8
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
import textile
return textile.textile(content.encode('utf-8'), encoding='utf-8')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/patch.py | trac/trac/mimeview/patch.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Christian Boos <cboos@edgewall.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Ludvig Strigeus
import os.path
from trac.core import *
from trac.mimeview.api import content_to_unicode, IHTMLPreviewRenderer, \
Mimeview
from trac.util.html import escape, Markup
from trac.util.text import expandtabs
from trac.util.translation import _
from trac.web.chrome import Chrome, add_script, add_stylesheet
__all__ = ['PatchRenderer']
class PatchRenderer(Component):
"""HTML renderer for patches in unified diff format.
This uses the same layout as in the wiki diff view or the changeset view.
"""
implements(IHTMLPreviewRenderer)
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
if mimetype in ('text/x-diff', 'text/x-patch'):
return 8
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
content = content_to_unicode(self.env, content, mimetype)
changes = self._diff_to_hdf(content.splitlines(),
Mimeview(self.env).tab_width)
if not changes or not any(c['diffs'] for c in changes):
self.log.warning('Invalid unified diff content')
return
data = {'diff': {'style': 'inline'}, 'no_id': True,
'changes': changes, 'longcol': 'File', 'shortcol': ''}
add_script(req, 'common/js/diff.js')
add_stylesheet(req, 'common/css/diff.css')
return Chrome(self.env).render_template(req, 'diff_div.html',
data, fragment=True)
# Internal methods
# FIXME: This function should probably share more code with the
# trac.versioncontrol.diff module
def _diff_to_hdf(self, difflines, tabwidth):
"""
Translate a diff file into something suitable for inclusion in HDF.
The result is [(filename, revname_old, revname_new, changes)],
where changes has the same format as the result of
`trac.versioncontrol.diff.hdf_diff`.
If the diff cannot be parsed, this method returns None.
"""
def _markup_intraline_change(fromlines, tolines):
from trac.versioncontrol.diff import get_change_extent
for i in xrange(len(fromlines)):
fr, to = fromlines[i], tolines[i]
(start, end) = get_change_extent(fr, to)
if start != 0 or end != 0:
last = end+len(fr)
fromlines[i] = fr[:start] + '\0' + fr[start:last] + \
'\1' + fr[last:]
last = end+len(to)
tolines[i] = to[:start] + '\0' + to[start:last] + \
'\1' + to[last:]
import re
space_re = re.compile(' ( +)|^ ')
def htmlify(match):
div, mod = divmod(len(match.group(0)), 2)
return div * ' ' + mod * ' '
comments = []
changes = []
lines = iter(difflines)
try:
line = lines.next()
while True:
oldpath = oldrev = newpath = newrev = ''
oldinfo = newinfo = []
binary = False
# consume preample, storing free lines in comments
# (also detect the special case of git binary patches)
if not line.startswith('--- '):
if not line.startswith('Index: ') and line != '=' * 67:
comments.append(line)
if line == "GIT binary patch":
binary = True
diffcmd_line = comments[0] # diff --git a/... b/,,,
oldpath, newpath = diffcmd_line.split()[-2:]
if any(c.startswith('new file') for c in comments):
oldpath = '/dev/null'
if any(c.startswith('deleted file') for c in comments):
newpath = '/dev/null'
oldinfo = ['', oldpath]
newinfo = ['', newpath]
index = [c for c in comments if c.startswith('index ')]
if index: # index 8f****78..1e****5c
oldrev, newrev = index[0].split()[-1].split('..')
oldinfo.append(oldrev)
newinfo.append(newrev)
line = lines.next()
while line:
comments.append(line)
line = lines.next()
else:
line = lines.next()
continue
if not oldinfo and not newinfo:
# Base filename/version from '--- <file> [rev]'
oldinfo = line.split(None, 2)
if len(oldinfo) > 1:
oldpath = oldinfo[1]
if len(oldinfo) > 2:
oldrev = oldinfo[2]
# Changed filename/version from '+++ <file> [rev]'
line = lines.next()
if not line.startswith('+++ '):
self.log.debug('expected +++ after ---, got ' + line)
return None
newinfo = line.split(None, 2)
if len(newinfo) > 1:
newpath = newinfo[1]
if len(newinfo) > 2:
newrev = newinfo[2]
shortrev = ('old', 'new')
if oldpath or newpath:
sep = re.compile(r'([/.~\\])')
commonprefix = ''.join(os.path.commonprefix(
[sep.split(newpath), sep.split(oldpath)]))
commonsuffix = ''.join(os.path.commonprefix(
[sep.split(newpath)[::-1],
sep.split(oldpath)[::-1]])[::-1])
if len(commonprefix) > len(commonsuffix):
common = commonprefix
elif commonsuffix:
common = commonsuffix.lstrip('/')
a = oldpath[:-len(commonsuffix)]
b = newpath[:-len(commonsuffix)]
if len(a) < 4 and len(b) < 4:
shortrev = (a, b)
elif oldpath == '/dev/null':
common = _("new file %(new)s",
new=newpath.lstrip('b/'))
shortrev = ('-', '+')
elif newpath == '/dev/null':
common = _("deleted file %(deleted)s",
deleted=oldpath.lstrip('a/'))
shortrev = ('+', '-')
else:
common = '(a) %s vs. (b) %s' % (oldpath, newpath)
shortrev = ('a', 'b')
else:
common = ''
groups = []
groups_title = []
changes.append({'change': 'edit', 'props': [],
'comments': '\n'.join(comments),
'binary': binary,
'diffs': groups,
'diffs_title': groups_title,
'old': {'path': common,
'rev': ' '.join(oldinfo[1:]),
'shortrev': shortrev[0]},
'new': {'path': common,
'rev': ' '.join(newinfo[1:]),
'shortrev': shortrev[1]}})
comments = []
line = lines.next()
while line:
# "@@ -333,10 +329,8 @@" or "@@ -1 +1 @@ [... title ...]"
r = re.match(r'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@'
'(.*)', line)
if not r:
break
blocks = []
groups.append(blocks)
fromline, fromend, toline, toend = \
[int(x or 1) for x in r.groups()[:4]]
groups_title.append(r.group(5))
last_type = extra = None
fromend += fromline
toend += toline
line = lines.next()
while fromline < fromend or toline < toend or extra:
# First character is the command
command = ' '
if line:
command, line = line[0], line[1:]
# Make a new block?
if (command == ' ') != last_type:
last_type = command == ' '
kind = 'unmod' if last_type else 'mod'
block = {'type': kind,
'base': {'offset': fromline - 1,
'lines': []},
'changed': {'offset': toline - 1,
'lines': []}}
blocks.append(block)
else:
block = blocks[-1]
if command == ' ':
sides = ['base', 'changed']
elif command == '+':
last_side = 'changed'
sides = [last_side]
elif command == '-':
last_side = 'base'
sides = [last_side]
elif command == '\\' and last_side:
meta = block[last_side].setdefault('meta', {})
meta[len(block[last_side]['lines'])] = True
sides = [last_side]
elif command == '@': # ill-formed patch
groups_title[-1] = "%s (%s)" % (
groups_title[-1],
_("this hunk was shorter than expected"))
line = '@'+line
break
else:
self.log.debug('expected +, - or \\, got '+command)
return None
for side in sides:
if side == 'base':
fromline += 1
else:
toline += 1
block[side]['lines'].append(line)
line = lines.next()
extra = line and line[0] == '\\'
except StopIteration:
pass
# Go through all groups/blocks and mark up intraline changes, and
# convert to html
for o in changes:
for group in o['diffs']:
for b in group:
base, changed = b['base'], b['changed']
f, t = base['lines'], changed['lines']
if b['type'] == 'mod':
if len(f) == 0:
b['type'] = 'add'
elif len(t) == 0:
b['type'] = 'rem'
elif len(f) == len(t):
_markup_intraline_change(f, t)
for i in xrange(len(f)):
line = expandtabs(f[i], tabwidth, '\0\1')
line = escape(line, quotes=False)
line = '<del>'.join([space_re.sub(htmlify, seg)
for seg in line.split('\0')])
line = line.replace('\1', '</del>')
f[i] = Markup(line)
if 'meta' in base and i in base['meta']:
f[i] = Markup('<em>%s</em>') % f[i]
for i in xrange(len(t)):
line = expandtabs(t[i], tabwidth, '\0\1')
line = escape(line, quotes=False)
line = '<ins>'.join([space_re.sub(htmlify, seg)
for seg in line.split('\0')])
line = line.replace('\1', '</ins>')
t[i] = Markup(line)
if 'meta' in changed and i in changed['meta']:
t[i] = Markup('<em>%s</em>') % t[i]
return changes
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/rst.py | trac/trac/mimeview/rst.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Oliver Rutherfurd
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin
# Oliver Rutherfurd (initial implementation)
# Nuutti Kotivuori (role support)
#
# Trac support for reStructured Text, including a custom 'trac' directive
#
# 'trac' directive code by Oliver Rutherfurd, overhauled by cboos.
#
# Inserts `reference` nodes for TracLinks into the document tree.
__docformat__ = 'reStructuredText'
from distutils.version import StrictVersion
try:
from docutils import nodes
from docutils.core import publish_parts
from docutils.parsers import rst
from docutils import __version__
has_docutils = True
except ImportError:
has_docutils = False
from genshi.core import escape
from trac.core import *
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, content_to_unicode
from trac.util.html import Element, Fragment, Markup, find_element
from trac.util.translation import _
from trac.wiki.api import WikiSystem
from trac.wiki.formatter import WikiProcessor, Formatter, extract_link
if has_docutils and StrictVersion(__version__) < StrictVersion('0.6'):
# Monkey-patch "raw" role handler in docutils to add a missing check
# See docutils bug #2845002 on SourceForge
def raw_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
if not inliner.document.settings.raw_enabled:
msg = inliner.reporter.warning('raw (and derived) roles disabled')
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
return _raw_role(role, rawtext, text, lineno, inliner, options,
content)
from docutils.parsers.rst import roles
raw_role.options = roles.raw_role.options
_raw_role = roles.raw_role
roles.raw_role = raw_role
roles.register_canonical_role('raw', raw_role)
if has_docutils:
# Register "trac" role handler and directive
def trac_get_reference(env, context, rawtext, target, text):
fulltext = target + ' ' + text if text else target
link = extract_link(env, context, fulltext)
uri = None
missing = False
if isinstance(link, (Element, Fragment)):
linktext = Markup(link).striptags()
# the following is a bit hackish, but it takes into account:
# - an eventual trailing '?' for missing wiki pages
# - space eventually introduced due to split_page_names option
if linktext.rstrip('?').replace(' ', '') != target:
text = linktext
elt = find_element(link, 'href', 'missing')
if elt is not None:
uri = elt.attrib.get('href', '')
missing = 'missing' in elt.attrib.get('class', '').split()
else:
uri = context.href.wiki(target)
missing = not WikiSystem(env).has_page(target)
if uri or missing:
reference = nodes.reference(rawtext, text or target)
reference['refuri'] = uri
if missing:
reference['classes'].append('missing')
return reference
def trac_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""Inserts a `reference` node into the document for a given
`TracLink`_, based on the content of the arguments.
Usage::
.. trac:: target [text]
``target`` may be any `TracLink`_, provided it doesn't
embed a space character (e.g. wiki:"..." notation won't work).
``[text]`` is optional. If not given, ``target`` is
used as the reference text.
.. _TracLink: http://trac.edgewall.org/wiki/TracLinks
"""
if hasattr(state.inliner, 'trac'):
env, context = state.inliner.trac
link = arguments[0]
if len(arguments) == 2:
text = arguments[1]
else:
text = None
reference = trac_get_reference(env, context, block_text, link, text)
if reference:
if isinstance(state, rst.states.SubstitutionDef):
return [reference]
p = nodes.paragraph()
p += reference
return [p]
# didn't find a match (invalid TracLink)
msg = _("%(link)s is not a valid TracLink", link=arguments[0])
# this is an user facing message, hence localized
else:
msg = "No trac context active while rendering"
# this is more an internal error, not translated.
# report a warning
warning = state_machine.reporter.warning(
msg, nodes.literal_block(block_text, block_text), line=lineno)
return [warning]
def trac_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
if hasattr(inliner, 'trac'):
env, context = inliner.trac
args = text.split(" ", 1)
link = args[0]
if len(args) == 2:
text = args[1]
else:
text = None
reference = trac_get_reference(env, context, rawtext, link, text)
if reference:
return [reference], []
msg = _("%(link)s is not a valid TracLink", link=rawtext)
else:
msg = "No trac context active while rendering"
return nodes.warning(None, nodes.literal_block(text, msg)), []
# 1 required arg, 1 optional arg, spaces allowed in last arg
trac_directive.arguments = (1, 1, 1)
trac_directive.options = None
trac_directive.content = None
rst.directives.register_directive('trac', trac_directive)
rst.roles.register_canonical_role('trac', trac_role)
# Register "code-block" role handler and directive
# (code derived from the leo plugin rst2)
def code_formatter(env, context, language, text):
processor = WikiProcessor(Formatter(env, context), language)
html = processor.process(text)
raw = nodes.raw('', html, format='html')
return raw
def code_block_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
if not hasattr(inliner, 'trac'):
return [], []
env, context = inliner.trac
language = options.get('language')
if not language:
args = text.split(':', 1)
language = args[0]
if len(args) == 2:
text = args[1]
else:
text = ''
return [code_formatter(env, context, language, text)], []
def code_block_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
"""
Create a code-block directive for docutils.
Usage: .. code-block:: language
If the language can be syntax highlighted it will be.
"""
if not hasattr(state.inliner, 'trac'):
return []
env, context = state.inliner.trac
language = arguments[0]
text = '\n'.join(content)
return [code_formatter(env, context, language, text)]
# These are documented
# at http://docutils.sourceforge.net/spec/howto/rst-directives.html.
code_block_directive.arguments = (
1, # Number of required arguments.
0, # Number of optional arguments.
0) # True if final argument may contain whitespace.
# A mapping from option name to conversion function.
code_block_role.options = code_block_directive.options = {
'language' :
rst.directives.unchanged # Return the text argument, unchanged
}
code_block_directive.content = 1 # True if content is allowed.
# Register the directive with docutils.
rst.directives.register_directive('code-block', code_block_directive)
rst.roles.register_local_role('code-block', code_block_role)
class ReStructuredTextRenderer(Component):
"""HTML renderer for plain text in reStructuredText format."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer)
can_render = False
def __init__(self):
if has_docutils:
if StrictVersion(__version__) < StrictVersion('0.3.9'):
self.log.warning('Docutils version >= %s required, '
'%s found' % ('0.3.9', __version__))
else:
self.can_render = True
# ISystemInfoProvider methods
def get_system_info(self):
if has_docutils:
yield 'Docutils', __version__
# IHTMLPreviewRenderer methods
def get_quality_ratio(self, mimetype):
if self.can_render and mimetype in ('text/x-rst',
'text/prs.fallenstein.rst'):
return 8
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
# Minimize visual impact of errors
from docutils.writers import html4css1
class TracHTMLTranslator(html4css1.HTMLTranslator):
"""Specialized translator with unobtrusive error reporting"""
def visit_system_message(self, node):
paragraph = node.children.pop(0)
message = escape(paragraph.astext()) if paragraph else ''
backrefs = node['backrefs']
if backrefs:
span = ('<span class="system-message">%s</span>' %
(''.join('<a href="#%s" title="%s">?</a>' %
(backref, message)
for backref in backrefs)))
else:
span = ('<span class="system-message" title="%s">?</span>' %
message)
self.body.append(span)
def depart_system_message(self, node):
pass
writer = html4css1.Writer()
writer.translator_class = TracHTMLTranslator
inliner = rst.states.Inliner()
inliner.trac = (self.env, context)
parser = rst.Parser(inliner=inliner)
content = content_to_unicode(self.env, content, mimetype)
parts = publish_parts(content, writer=writer, parser=parser,
settings_overrides={'halt_level': 6,
'warning_stream': False,
'file_insertion_enabled': 0,
'raw_enabled': 0,
'warning_stream': False})
return parts['html_body']
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/pygments.py | trac/trac/mimeview/pygments.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# Author: Matthew Good <matt@matt-good.net>
from datetime import datetime
import os
from pkg_resources import resource_filename
import re
from trac.core import *
from trac.config import ListOption, Option
from trac.env import ISystemInfoProvider
from trac.mimeview.api import IHTMLPreviewRenderer, Mimeview
from trac.prefs import IPreferencePanelProvider
from trac.util import get_pkginfo
from trac.util.datefmt import http_date, localtz
from trac.util.translation import _
from trac.web.api import IRequestHandler, HTTPNotFound
from trac.web.chrome import add_notice, add_stylesheet
from genshi import QName, Stream
from genshi.core import Attrs, START, END, TEXT
# Kludge to workaround the lack of absolute imports in Python version prior to
# 2.5
pygments = __import__('pygments', {}, {}, ['lexers', 'styles', 'formatters'])
get_all_lexers = pygments.lexers.get_all_lexers
get_lexer_by_name = pygments.lexers.get_lexer_by_name
HtmlFormatter = pygments.formatters.html.HtmlFormatter
get_all_styles = pygments.styles.get_all_styles
get_style_by_name = pygments.styles.get_style_by_name
__all__ = ['PygmentsRenderer']
class PygmentsRenderer(Component):
"""HTML renderer for syntax highlighting based on Pygments."""
implements(ISystemInfoProvider, IHTMLPreviewRenderer,
IPreferencePanelProvider, IRequestHandler)
default_style = Option('mimeviewer', 'pygments_default_style', 'trac',
"""The default style to use for Pygments syntax highlighting.""")
pygments_modes = ListOption('mimeviewer', 'pygments_modes',
'', doc=
"""List of additional MIME types known by Pygments.
For each, a tuple `mimetype:mode:quality` has to be
specified, where `mimetype` is the MIME type,
`mode` is the corresponding Pygments mode to be used
for the conversion and `quality` is the quality ratio
associated to this conversion. That can also be used
to override the default quality ratio used by the
Pygments render.""")
expand_tabs = True
returns_source = True
QUALITY_RATIO = 7
EXAMPLE = """<!DOCTYPE html>
<html lang="en">
<head>
<title>Hello, world!</title>
<script>
jQuery(document).ready(function($) {
$("h1").fadeIn("slow");
});
</script>
</head>
<body>
<h1>Hello, world!</h1>
</body>
</html>"""
def __init__(self):
self._types = None
# ISystemInfoProvider methods
def get_system_info(self):
version = get_pkginfo(pygments).get('version')
# if installed from source, fallback to the hardcoded version info
if not version and hasattr(pygments, '__version__'):
version = pygments.__version__
yield 'Pygments', version
# IHTMLPreviewRenderer methods
def get_extra_mimetypes(self):
for lexname, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexname
for mimetype in mimetypes:
yield mimetype, aliases
def get_quality_ratio(self, mimetype):
# Extend default MIME type to mode mappings with configured ones
if self._types is None:
self._init_types()
try:
return self._types[mimetype][1]
except KeyError:
return 0
def render(self, context, mimetype, content, filename=None, rev=None):
req = context.req
if self._types is None:
self._init_types()
add_stylesheet(req, '/pygments/%s.css' %
req.session.get('pygments_style', self.default_style))
try:
if len(content) > 0:
mimetype = mimetype.split(';', 1)[0]
language = self._types[mimetype][0]
return self._generate(language, content)
except (KeyError, ValueError):
raise Exception("No Pygments lexer found for mime-type '%s'."
% mimetype)
# IPreferencePanelProvider methods
def get_preference_panels(self, req):
yield ('pygments', _('Syntax Highlighting'))
def render_preference_panel(self, req, panel):
styles = list(get_all_styles())
if req.method == 'POST':
style = req.args.get('style')
if style and style in styles:
req.session['pygments_style'] = style
add_notice(req, _('Your preferences have been saved.'))
req.redirect(req.href.prefs(panel or None))
output = self._generate('html', self.EXAMPLE)
return 'prefs_pygments.html', {
'output': output,
'selection': req.session.get('pygments_style', self.default_style),
'styles': styles
}
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/pygments/(\w+)\.css', req.path_info)
if match:
req.args['style'] = match.group(1)
return True
def process_request(self, req):
style = req.args['style']
try:
style_cls = get_style_by_name(style)
except ValueError, e:
raise HTTPNotFound(e)
parts = style_cls.__module__.split('.')
filename = resource_filename('.'.join(parts[:-1]), parts[-1] + '.py')
mtime = datetime.fromtimestamp(os.path.getmtime(filename), localtz)
last_modified = http_date(mtime)
if last_modified == req.get_header('If-Modified-Since'):
req.send_response(304)
req.end_headers()
return
formatter = HtmlFormatter(style=style_cls)
content = u'\n\n'.join([
formatter.get_style_defs('div.code pre'),
formatter.get_style_defs('table.code td')
]).encode('utf-8')
req.send_response(200)
req.send_header('Content-Type', 'text/css; charset=utf-8')
req.send_header('Last-Modified', last_modified)
req.send_header('Content-Length', len(content))
req.write(content)
# Internal methods
def _init_types(self):
self._types = {}
for lexname, aliases, _, mimetypes in get_all_lexers():
name = aliases[0] if aliases else lexname
for mimetype in mimetypes:
self._types[mimetype] = (name, self.QUALITY_RATIO)
# Pygments currently doesn't know application/javascript
if 'application/javascript' not in self._types:
js_entry = self._types.get('text/javascript')
if js_entry:
self._types['application/javascript'] = js_entry
self._types.update(
Mimeview(self.env).configured_modes_mapping('pygments')
)
def _generate(self, language, content):
lexer = get_lexer_by_name(language, stripnl=False)
return GenshiHtmlFormatter().generate(lexer.get_tokens(content))
class GenshiHtmlFormatter(HtmlFormatter):
"""A Pygments formatter subclass that generates a Python stream instead
of writing markup as strings to an output file.
"""
def _chunk(self, tokens):
"""Groups tokens with the same CSS class in the token stream
and yields them one by one, along with the CSS class, with the
values chunked together."""
last_class = None
text = []
for ttype, value in tokens:
c = self._get_css_class(ttype)
if c == 'n':
c = ''
if c == last_class:
text.append(value)
continue
# If no value, leave the old <span> open.
if value:
yield last_class, u''.join(text)
text = [value]
last_class = c
if text:
yield last_class, u''.join(text)
def generate(self, tokens):
pos = (None, -1, -1)
span = QName('span')
class_ = QName('class')
def _generate():
for c, text in self._chunk(tokens):
if c:
attrs = Attrs([(class_, c)])
yield START, (span, attrs), pos
yield TEXT, text, pos
yield END, span, pos
else:
yield TEXT, text, pos
return Stream(_generate())
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/__init__.py | trac/trac/mimeview/__init__.py | from trac.mimeview.api import *
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/tests/api.py | trac/trac/mimeview/tests/api.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
import unittest
from StringIO import StringIO
import sys
from trac.core import *
from trac.test import EnvironmentStub
from trac.mimeview import api
from trac.mimeview.api import get_mimetype, IContentConverter, Mimeview, \
_group_lines
from genshi import Stream, Namespace
from genshi.core import Attrs, TEXT, START, END
from genshi.input import HTMLParser
class GetMimeTypeTestCase(unittest.TestCase):
def test_from_suffix_using_MIME_MAP(self):
self.assertEqual('text/plain', get_mimetype('README', None))
self.assertEqual('text/plain', get_mimetype('README.txt', None))
def test_from_suffix_using_mimetypes(self):
accepted = ('image/png', 'image/x-png')
self.assertTrue(get_mimetype('doc/trac_logo.png', None) in accepted)
def test_from_content_using_CONTENT_RE(self):
self.assertEqual('text/x-python',
get_mimetype('xxx', """
#!/usr/bin/python
# This is a python script
"""))
self.assertEqual('text/x-python',
get_mimetype('xxx', """
#!/usr/bin/env python
# This is a python script
"""))
self.assertEqual('text/x-ksh',
get_mimetype('xxx', """
#!/bin/ksh
# This is a shell script
"""))
self.assertEqual('text/x-python',
get_mimetype('xxx', """
# -*- Python -*-
# This is a python script
"""))
self.assertEqual('text/x-ruby',
get_mimetype('xxx', """
# -*- mode: ruby -*-
# This is a ruby script
"""))
self.assertEqual('text/x-python',
get_mimetype('xxx', ' ' * 2000 + '# vim: ft=python'))
def test_from_content_using_is_binary(self):
self.assertEqual('application/octet-stream',
get_mimetype('xxx', "abc\0xyz"))
class MimeviewTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(default_data=False,
enable=['%s.%s' % (self.__module__, c)
for c in ['Converter0', 'Converter1', 'Converter2']])
def tearDown(self):
pass
def test_get_supported_conversions(self):
class Converter0(Component):
implements(IContentConverter)
def get_supported_conversions(self):
yield 'key0', 'Format 0', 'c0', 'text/x-sample', 'text/html', 8
class Converter2(Component):
implements(IContentConverter)
def get_supported_conversions(self):
yield 'key2', 'Format 2', 'c2', 'text/x-sample', 'text/html', 2
class Converter1(Component):
implements(IContentConverter)
def get_supported_conversions(self):
yield 'key1', 'Format 1', 'c1', 'text/x-sample', 'text/html', 4
mimeview = Mimeview(self.env)
conversions = mimeview.get_supported_conversions('text/x-sample')
self.assertEqual(Converter0(self.env), conversions[0][-1])
self.assertEqual(Converter1(self.env), conversions[1][-1])
self.assertEqual(Converter2(self.env), conversions[2][-1])
class GroupLinesTestCase(unittest.TestCase):
def test_empty_stream(self):
# FIXME: this currently fails
lines = list(_group_lines([]))
self.assertEqual(len(lines), 0)
def test_text_only_stream(self):
input = [(TEXT, "test", (None, -1, -1))]
lines = list(_group_lines(input))
self.assertEquals(len(lines), 1)
self.assertTrue(isinstance(lines[0], Stream))
self.assertEquals(lines[0].events, input)
def test_text_only_stream2(self):
input = [(TEXT, "test\n", (None, -1, -1))]
lines = list(_group_lines(input))
self.assertEquals(len(lines), 1)
self.assertTrue(isinstance(lines[0], Stream))
self.assertEquals(lines[0].events, [(TEXT, "test", (None, -1, -1))])
def test_simplespan(self):
input = HTMLParser(StringIO(u"<span>test</span>"), encoding=None)
lines = list(_group_lines(input))
self.assertEquals(len(lines), 1)
self.assertTrue(isinstance(lines[0], Stream))
for (a, b) in zip(lines[0], input):
self.assertEqual(a, b)
def test_empty_text_stream(self):
"""
http://trac.edgewall.org/ticket/4336
"""
input = [(TEXT, "", (None, -1, -1))]
lines = list(_group_lines(input))
self.assertEquals(len(lines), 0)
def test_newline_stream(self):
input = [(TEXT, "\n", (None, -1, -1))]
lines = list(_group_lines(input))
self.assertEquals(len(lines), 1)
def test_newline_stream2(self):
input = [(TEXT, "\n\n\n", (None, -1, -1))]
lines = list(_group_lines(input))
self.assertEquals(len(lines), 3)
def test_empty_text_in_span(self):
"""
http://trac.edgewall.org/ticket/4336
"""
ns = Namespace('http://www.w3.org/1999/xhtml')
input = [(START, (ns.span, Attrs([])), (None, -1, -1)),
(TEXT, "", (None, -1, -1)),
(END, ns.span, (None, -1, -1)),
]
lines = list(_group_lines(input))
self.assertEqual(len(lines), 0)
def test_newline(self):
"""
If the text element does not end with a newline, it's not properly
closed.
"""
input = HTMLParser(StringIO(u'<span class="c">a\nb</span>'),
encoding=None)
expected = ['<span class="c">a</span>',
'<span class="c">b</span>',
]
lines = list(_group_lines(input))
self.assertEquals(len(lines), len(expected))
for a, b in zip(lines, expected):
self.assertEquals(a.render('html'), b)
def test_newline2(self):
"""
Same as test_newline above, but make sure it behaves properly wrt
the trailing \\n, especially given it's inside an element.
"""
input = HTMLParser(StringIO(u'<span class="c">a\nb\n</span>'),
encoding=None)
expected = ['<span class="c">a</span>',
'<span class="c">b</span>',
]
lines = list(_group_lines(input))
self.assertEquals(len(lines), len(expected))
for a, b in zip(lines, expected):
self.assertEquals(a.render('html'), b)
def test_multinewline(self):
"""
ditto.
"""
input = HTMLParser(StringIO(u'<span class="c">\n\n\na</span>'),
encoding=None)
expected = ['<span class="c"></span>',
'<span class="c"></span>',
'<span class="c"></span>',
'<span class="c">a</span>',
]
lines = list(_group_lines(input))
self.assertEquals(len(lines), len(expected))
for a, b in zip(lines, expected):
self.assertEquals(a.render('html'), b)
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(api))
suite.addTest(unittest.makeSuite(GetMimeTypeTestCase, 'test'))
suite.addTest(unittest.makeSuite(MimeviewTestCase, 'test'))
suite.addTest(unittest.makeSuite(GroupLinesTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/tests/patch.py | trac/trac/mimeview/tests/patch.py | # -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import unittest
from genshi.core import Stream
from genshi.input import HTMLParser, XML
from trac.mimeview.api import Mimeview, RenderingContext
from trac.mimeview.patch import PatchRenderer
from trac.test import EnvironmentStub, Mock, MockPerm
from trac.web.chrome import Chrome, web_context
from trac.web.href import Href
class PatchRendererTestCase(unittest.TestCase):
def setUp(self):
env = EnvironmentStub(enable=[Chrome, PatchRenderer])
req = Mock(base_path='', chrome={}, args={}, session={},
abs_href=Href('/'), href=Href('/'), locale='',
perm=MockPerm(), authname=None, tz=None)
self.context = web_context(req)
self.patch = Mimeview(env).renderers[0]
patch_html = open(os.path.join(os.path.split(__file__)[0],
'patch.html'))
self.patch_html = Stream(list(HTMLParser(patch_html, encoding='utf-8')))
def _expected(self, expected_id):
return self.patch_html.select('//div[@id="%s"]/div' % expected_id)
def _test(self, expected_id, result):
expected = self._expected(expected_id).render(encoding='utf-8')
result = XML(result.render(encoding='utf-8')).render(encoding='utf-8')
expected, result = expected.splitlines(), result.splitlines()
for exp, res in zip(expected, result):
self.assertEquals(exp, res)
self.assertEquals(len(expected), len(result))
def test_simple(self):
"""
Simple patch rendering
"""
result = self.patch.render(self.context, None, """
--- README.orig 2006-10-27 14:42:04.062500000 +0200
+++ README 2006-10-27 14:42:28.125000000 +0200
@@ -1,5 +1,5 @@
----
-base
-base
-base
+be
+the base
+base modified
.
""")
self.assertTrue(result)
self._test('simple', result)
def test_no_newline_in_base(self):
"""
Simple regression test for #4027 ("No newline at end of file")
"""
result = self.patch.render(self.context, None, """
--- nonewline 2006-10-27 08:36:48.453125000 +0200
+++ newline 2006-10-27 08:36:57.187500000 +0200
@@ -1 +1 @@
-ONELINE
\ No newline at end of file
+ONELINE
""")
self.assertTrue(result)
self._test('no_newline_in_base', result)
def test_no_newline_in_changed(self):
"""
Another simple regression test for #4027 ("No newline at end of file")
"""
result = self.patch.render(self.context, None, """
--- newline 2006-10-27 08:36:57.187500000 +0200
+++ nonewline 2006-10-27 08:36:48.453125000 +0200
@@ -1 +1 @@
-ONELINE
+ONELINE
\ No newline at end of file
""")
self.assertTrue(result)
self._test('no_newline_in_changed', result)
def test_diff_to_hdf_expandtabs(self):
"""Regression test related to #4557"""
changes = self.patch._diff_to_hdf(
['--- hello.c 1',
'+++ hello.c 2',
'@@ -1 +1 @@',
'-aa\tb',
'+aaxb'], 8)
self.assertEquals('aa<del> </del>b',
str(changes[0]['diffs'][0][0]['base']['lines'][0]))
self.assertEquals('aa<ins>x</ins>b',
str(changes[0]['diffs'][0][0]['changed']['lines'][0]))
def test_diff_to_hdf_leading_ws(self):
"""Regression test related to #5795"""
changes = self.patch._diff_to_hdf(
['--- hello.c 1',
'+++ hello.c 2',
'@@ -1 +1 @@',
'-*a',
'+ *a'], 8)
self.assertEquals('<del></del>*a',
str(changes[0]['diffs'][0][0]['base']['lines'][0]))
self.assertEquals('<ins> </ins>*a',
str(changes[0]['diffs'][0][0]['changed']['lines'][0]))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PatchRendererTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/tests/pygments.py | trac/trac/mimeview/tests/pygments.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import unittest
from genshi.core import Stream, TEXT
from genshi.input import HTMLParser
try:
pygments = __import__('pygments', {}, {}, [])
have_pygments = True
except ImportError:
have_pygments = False
from trac.mimeview.api import Mimeview, RenderingContext
if have_pygments:
from trac.mimeview.pygments import PygmentsRenderer
from trac.test import EnvironmentStub, Mock
from trac.web.chrome import Chrome, web_context
from trac.web.href import Href
class PygmentsRendererTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub(enable=[Chrome, PygmentsRenderer])
self.pygments = Mimeview(self.env).renderers[0]
self.req = Mock(base_path='', chrome={}, args={},
abs_href=Href('/'), href=Href('/'),
session={}, perm=None, authname=None, tz=None)
self.context = web_context(self.req)
pygments_html = open(os.path.join(os.path.split(__file__)[0],
'pygments.html'))
self.pygments_html = Stream(list(HTMLParser(pygments_html, encoding='utf-8')))
def _expected(self, expected_id):
return self.pygments_html.select(
'//div[@id="%s"]/*|//div[@id="%s"]/text())' %
(expected_id, expected_id))
def _test(self, expected_id, result):
expected = str(self._expected(expected_id))
result = str(result)
#print "\nE: " + repr(expected)
#print "\nR: " + repr(result)
expected, result = expected.splitlines(), result.splitlines()
for exp, res in zip(expected, result):
self.assertEquals(exp, res)
self.assertEquals(len(expected), len(result))
def test_python_hello(self):
"""
Simple Python highlighting with Pygments (direct)
"""
result = self.pygments.render(self.context, 'text/x-python', """
def hello():
return "Hello World!"
""")
self.assertTrue(result)
self._test('python_hello', result)
def test_python_hello_mimeview(self):
"""
Simple Python highlighting with Pygments (through Mimeview.render)
"""
result = Mimeview(self.env).render(self.context, 'text/x-python', """
def hello():
return "Hello World!"
""")
self.assertTrue(result)
self._test('python_hello_mimeview', result)
def test_newline_content(self):
"""
The behavior of Pygments changed post-Pygments 0.11.1, and now
contains all four newlines. In Pygments 0.11.1 and prior, it only
has three since stripnl defaults to True.
See http://trac.edgewall.org/ticket/7705.
"""
from pkg_resources import parse_version, get_distribution
result = self.pygments.render(self.context, 'text/x-python', '\n\n\n\n')
self.assertTrue(result)
t = "".join([r[1] for r in result if r[0] is TEXT])
if parse_version(pygments.__version__) > parse_version('0.11.1') \
or pygments.__version__ == '0.11.1' and 'dev' in \
get_distribution('Pygments').version:
self.assertEqual("\n\n\n\n", t)
else:
self.assertEqual("\n\n\n", t)
def test_empty_content(self):
"""
A '\n' token is generated for an empty file, so we have to bypass
pygments when rendering empty files.
"""
result = self.pygments.render(self.context, 'text/x-python', '')
self.assertEqual(None, result)
def test_extra_mimetypes(self):
"""
The text/x-ini mimetype is normally not known by Trac, but
Pygments supports it.
"""
mimeview = Mimeview(self.env)
self.assertEqual('text/x-ini; charset=utf-8',
mimeview.get_mimetype('file.ini'))
self.assertEqual('text/x-ini; charset=utf-8',
mimeview.get_mimetype('file.cfg'))
self.assertEqual('text/x-ini; charset=utf-8',
mimeview.get_mimetype('file.text/x-ini'))
def suite():
suite = unittest.TestSuite()
if have_pygments:
suite.addTest(unittest.makeSuite(PygmentsRendererTestCase, 'test'))
else:
print 'SKIP: mimeview/tests/pygments (no pygments installed)'
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/mimeview/tests/__init__.py | trac/trac/mimeview/tests/__init__.py | from trac.mimeview.tests import api, patch, pygments
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(api.suite())
suite.addTest(patch.suite())
suite.addTest(pygments.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/api.py | trac/trac/web/api.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from BaseHTTPServer import BaseHTTPRequestHandler
from Cookie import CookieError, BaseCookie, SimpleCookie
import cgi
from datetime import datetime
import errno
from hashlib import md5
import new
import mimetypes
import os
import socket
from StringIO import StringIO
import sys
import urlparse
from trac.core import Interface, TracError
from trac.util import get_last_traceback, unquote
from trac.util.datefmt import http_date, localtz
from trac.util.text import empty, to_unicode
from trac.util.translation import _
from trac.web.href import Href
from trac.web.wsgi import _FileWrapper
class IAuthenticator(Interface):
"""Extension point interface for components that can provide the name
of the remote user."""
def authenticate(req):
"""Return the name of the remote user, or `None` if the identity of the
user is unknown."""
class IRequestHandler(Interface):
"""Decide which `trac.core.Component` handles which `Request`, and how."""
def match_request(req):
"""Return whether the handler wants to process the given request."""
def process_request(req):
"""Process the request.
Return a `(template_name, data, content_type)` tuple,
where `data` is a dictionary of substitutions for the Genshi template.
"text/html" is assumed if `content_type` is `None`.
Note that if template processing should not occur, this method can
simply send the response itself and not return anything.
:Since 1.0: Clearsilver templates are no longer supported.
"""
class IRequestFilter(Interface):
"""Enable components to interfere with the processing done by the
main handler, either before and/or after it enters in action.
"""
def pre_process_request(req, handler):
"""Called after initial handler selection, and can be used to change
the selected handler or redirect request.
Always returns the request handler, even if unchanged.
"""
def post_process_request(req, template, data, content_type):
"""Do any post-processing the request might need; typically adding
values to the template `data` dictionary, or changing the Genshi
template or mime type.
`data` may be updated in place.
Always returns a tuple of (template, data, content_type), even if
unchanged.
Note that `template`, `data`, `content_type` will be `None` if:
- called when processing an error page
- the default request handler did not return any result
:Since 0.11: there's a `data` argument for supporting Genshi templates;
this introduced a difference in arity which made it possible to
distinguish between the IRequestFilter components still targeted
at ClearSilver templates and the newer ones targeted at Genshi
templates.
:Since 1.0: Clearsilver templates are no longer supported.
"""
class ITemplateStreamFilter(Interface):
"""Transform the generated content by filtering the Genshi event stream
generated by the template, prior to its serialization.
"""
def filter_stream(req, method, filename, stream, data):
"""Return a filtered Genshi event stream, or the original unfiltered
stream if no match.
`req` is the current request object, `method` is the Genshi render
method (xml, xhtml or text), `filename` is the filename of the template
to be rendered, `stream` is the event stream and `data` is the data for
the current template.
See the Genshi_ documentation for more information.
.. _Genshi: http://genshi.edgewall.org/wiki/Documentation/filters.html
"""
HTTP_STATUS = dict([(code, reason.title()) for code, (reason, description)
in BaseHTTPRequestHandler.responses.items()])
class HTTPException(Exception):
def __init__(self, detail, *args):
if isinstance(detail, TracError):
self.detail = detail.message
self.reason = detail.title
else:
self.detail = detail
if args:
self.detail = self.detail % args
Exception.__init__(self, '%s %s (%s)' % (self.code, self.reason,
self.detail))
@classmethod
def subclass(cls, name, code):
"""Create a new Exception class representing a HTTP status code."""
reason = HTTP_STATUS.get(code, 'Unknown')
new_class = new.classobj(name, (HTTPException,), {
'__doc__': 'Exception for HTTP %d %s' % (code, reason)
})
new_class.code = code
new_class.reason = reason
return new_class
_HTTPException_subclass_names = []
for code in [code for code in HTTP_STATUS if code >= 400]:
exc_name = HTTP_STATUS[code].replace(' ', '').replace('-', '')
# 2.5 compatibility hack:
if exc_name == 'InternalServerError':
exc_name = 'InternalError'
if exc_name.lower().startswith('http'):
exc_name = exc_name[4:]
exc_name = 'HTTP' + exc_name
setattr(sys.modules[__name__], exc_name,
HTTPException.subclass(exc_name, code))
_HTTPException_subclass_names.append(exc_name)
del code, exc_name
class _FieldStorage(cgi.FieldStorage):
"""Our own version of cgi.FieldStorage, with tweaks."""
def read_multi(self, *args, **kwargs):
try:
cgi.FieldStorage.read_multi(self, *args, **kwargs)
except ValueError:
# Most likely "Invalid boundary in multipart form",
# possibly an upload of a .mht file? See #9880.
self.read_single()
class _RequestArgs(dict):
"""Dictionary subclass that provides convenient access to request
parameters that may contain multiple values."""
def getfirst(self, name, default=None):
"""Return the first value for the specified parameter, or `default` if
the parameter was not provided.
"""
if name not in self:
return default
val = self[name]
if isinstance(val, list):
val = val[0]
return val
def getlist(self, name):
"""Return a list of values for the specified parameter, even if only
one value was provided.
"""
if name not in self:
return []
val = self[name]
if not isinstance(val, list):
val = [val]
return val
def parse_arg_list(query_string):
"""Parse a query string into a list of `(name, value)` tuples."""
args = []
if not query_string:
return args
for arg in query_string.split('&'):
nv = arg.split('=', 1)
if len(nv) == 2:
(name, value) = nv
else:
(name, value) = (nv[0], empty)
name = unquote(name.replace('+', ' '))
if isinstance(name, str):
name = unicode(name, 'utf-8')
value = unquote(value.replace('+', ' '))
if isinstance(value, str):
value = unicode(value, 'utf-8')
args.append((name, value))
return args
def arg_list_to_args(arg_list):
"""Convert a list of `(name, value)` tuples into into a `_RequestArgs`."""
args = _RequestArgs()
for name, value in arg_list:
if name in args:
if isinstance(args[name], list):
args[name].append(value)
else:
args[name] = [args[name], value]
else:
args[name] = value
return args
class RequestDone(Exception):
"""Marker exception that indicates whether request processing has completed
and a response was sent.
"""
class Cookie(SimpleCookie):
def load(self, rawdata, ignore_parse_errors=False):
if ignore_parse_errors:
self.bad_cookies = []
self._BaseCookie__set = self._loose_set
SimpleCookie.load(self, rawdata)
if ignore_parse_errors:
self._BaseCookie__set = self._strict_set
for key in self.bad_cookies:
del self[key]
_strict_set = BaseCookie._BaseCookie__set
def _loose_set(self, key, real_value, coded_value):
# If a key appears multiple times, the first occurrence has the
# narrowest scope, keep that
if key in self:
return
try:
self._strict_set(key, real_value, coded_value)
except CookieError:
self.bad_cookies.append(key)
dict.__setitem__(self, key, None)
class Request(object):
"""Represents a HTTP request/response pair.
This class provides a convenience API over WSGI.
"""
def __init__(self, environ, start_response):
"""Create the request wrapper.
:param environ: The WSGI environment dict
:param start_response: The WSGI callback for starting the response
:param callbacks: A dictionary of functions that are used to lazily
evaluate attribute lookups
"""
self.environ = environ
self._start_response = start_response
self._write = None
self._status = '200 OK'
self._response = None
self._outheaders = []
self._outcharset = None
self.outcookie = Cookie()
self.callbacks = {
'arg_list': Request._parse_arg_list,
'args': lambda req: arg_list_to_args(req.arg_list),
'languages': Request._parse_languages,
'incookie': Request._parse_cookies,
'_inheaders': Request._parse_headers
}
self.redirect_listeners = []
self.base_url = self.environ.get('trac.base_url')
if not self.base_url:
self.base_url = self._reconstruct_url()
self.href = Href(self.base_path)
self.abs_href = Href(self.base_url)
def __getattr__(self, name):
"""Performs lazy attribute lookup by delegating to the functions in the
callbacks dictionary."""
if name in self.callbacks:
value = self.callbacks[name](self)
setattr(self, name, value)
return value
raise AttributeError(name)
def __repr__(self):
path_info = self.environ.get('PATH_INFO', '')
return '<%s "%s %r">' % (self.__class__.__name__, self.method,
path_info)
# Public API
@property
def method(self):
"""The HTTP method of the request"""
return self.environ['REQUEST_METHOD']
@property
def path_info(self):
"""Path inside the application"""
path_info = self.environ.get('PATH_INFO', '')
try:
return unicode(path_info, 'utf-8')
except UnicodeDecodeError:
raise HTTPNotFound(_("Invalid URL encoding (was %(path_info)r)",
path_info=path_info))
@property
def query_string(self):
"""Query part of the request"""
return self.environ.get('QUERY_STRING', '')
@property
def remote_addr(self):
"""IP address of the remote user"""
return self.environ.get('REMOTE_ADDR')
@property
def remote_user(self):
""" Name of the remote user.
Will be `None` if the user has not logged in using HTTP authentication.
"""
return self.environ.get('REMOTE_USER')
@property
def scheme(self):
"""The scheme of the request URL"""
return self.environ['wsgi.url_scheme']
@property
def base_path(self):
"""The root path of the application"""
return self.environ.get('SCRIPT_NAME', '')
@property
def server_name(self):
"""Name of the server"""
return self.environ['SERVER_NAME']
@property
def server_port(self):
"""Port number the server is bound to"""
return int(self.environ['SERVER_PORT'])
def add_redirect_listener(self, listener):
"""Add a callable to be called prior to executing a redirect.
The callable is passed the arguments to the `redirect()` call.
"""
self.redirect_listeners.append(listener)
def get_header(self, name):
"""Return the value of the specified HTTP header, or `None` if there's
no such header in the request.
"""
name = name.lower()
for key, value in self._inheaders:
if key == name:
return value
return None
def send_response(self, code=200):
"""Set the status code of the response."""
self._status = '%s %s' % (code, HTTP_STATUS.get(code, 'Unknown'))
def send_header(self, name, value):
"""Send the response header with the specified name and value.
`value` must either be an `unicode` string or can be converted to one
(e.g. numbers, ...)
"""
if name.lower() == 'content-type':
ctpos = value.find('charset=')
if ctpos >= 0:
self._outcharset = value[ctpos + 8:].strip()
elif name.lower() == 'content-length':
self._content_length = int(value)
self._outheaders.append((name, unicode(value).encode('utf-8')))
def end_headers(self):
"""Must be called after all headers have been sent and before the
actual content is written.
"""
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders)
def check_modified(self, datetime, extra=''):
"""Check the request "If-None-Match" header against an entity tag.
The entity tag is generated from the specified last modified time
(`datetime`), optionally appending an `extra` string to
indicate variants of the requested resource.
That `extra` parameter can also be a list, in which case the MD5 sum
of the list content will be used.
If the generated tag matches the "If-None-Match" header of the request,
this method sends a "304 Not Modified" response to the client.
Otherwise, it adds the entity tag as an "ETag" header to the response
so that consecutive requests can be cached.
"""
if isinstance(extra, list):
m = md5()
for elt in extra:
m.update(repr(elt))
extra = m.hexdigest()
etag = 'W/"%s/%s/%s"' % (self.authname, http_date(datetime), extra)
inm = self.get_header('If-None-Match')
if (not inm or inm != etag):
self.send_header('ETag', etag)
else:
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
def redirect(self, url, permanent=False):
"""Send a redirect to the client, forwarding to the specified URL.
The `url` may be relative or absolute, relative URLs will be translated
appropriately.
"""
for listener in self.redirect_listeners:
listener(self, url, permanent)
if permanent:
status = 301 # 'Moved Permanently'
elif self.method == 'POST':
status = 303 # 'See Other' -- safe to use in response to a POST
else:
status = 302 # 'Found' -- normal temporary redirect
self.send_response(status)
if not url.startswith(('http://', 'https://')):
# Make sure the URL is absolute
scheme, host = urlparse.urlparse(self.base_url)[:2]
url = urlparse.urlunparse((scheme, host, url, None, None, None))
# Workaround #10382, IE6+ bug when post and redirect with hash
if status == 303 and '#' in url and \
' MSIE ' in self.environ.get('HTTP_USER_AGENT', ''):
url = url.replace('#', '#__msie303:')
self.send_header('Location', url)
self.send_header('Content-Type', 'text/plain')
self.send_header('Content-Length', 0)
self.send_header('Pragma', 'no-cache')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.end_headers()
raise RequestDone
def send(self, content, content_type='text/html', status=200):
self.send_response(status)
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(content))
self.end_headers()
if self.method != 'HEAD':
self.write(content)
raise RequestDone
def send_error(self, exc_info, template='error.html',
content_type='text/html', status=500, env=None, data={}):
try:
if template.endswith('.html'):
if env:
from trac.web.chrome import Chrome
try:
data = Chrome(env).render_template(self, template,
data, 'text/html')
except Exception:
# second chance rendering, in "safe" mode
data['trac_error_rendering'] = True
data = Chrome(env).render_template(self, template,
data, 'text/html')
else:
content_type = 'text/plain'
data = '%s\n\n%s: %s' % (data.get('title'),
data.get('type'),
data.get('message'))
except Exception: # failed to render
data = get_last_traceback()
content_type = 'text/plain'
if isinstance(data, unicode):
data = data.encode('utf-8')
self.send_response(status)
self._outheaders = []
self.send_header('Cache-Control', 'must-revalidate')
self.send_header('Expires', 'Fri, 01 Jan 1999 00:00:00 GMT')
self.send_header('Content-Type', content_type + ';charset=utf-8')
self.send_header('Content-Length', len(data))
self._send_cookie_headers()
self._write = self._start_response(self._status, self._outheaders,
exc_info)
if self.method != 'HEAD':
self.write(data)
raise RequestDone
def send_no_content(self):
self.send_response(204)
self.send_header('Content-Length', 0)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
raise RequestDone
def send_file(self, path, mimetype=None):
"""Send a local file to the browser.
This method includes the "Last-Modified", "Content-Type" and
"Content-Length" headers in the response, corresponding to the file
attributes. It also checks the last modification time of the local file
against the "If-Modified-Since" provided by the user agent, and sends a
"304 Not Modified" response if it matches.
"""
if not os.path.isfile(path):
raise HTTPNotFound(_("File %(path)s not found", path=path))
stat = os.stat(path)
mtime = datetime.fromtimestamp(stat.st_mtime, localtz)
last_modified = http_date(mtime)
if last_modified == self.get_header('If-Modified-Since'):
self.send_response(304)
self.send_header('Content-Length', 0)
self.end_headers()
raise RequestDone
if not mimetype:
mimetype = mimetypes.guess_type(path)[0] or \
'application/octet-stream'
self.send_response(200)
self.send_header('Content-Type', mimetype)
self.send_header('Content-Length', stat.st_size)
self.send_header('Last-Modified', last_modified)
use_xsendfile = getattr(self, 'use_xsendfile', False)
if use_xsendfile:
self.send_header('X-Sendfile', os.path.abspath(path))
self.end_headers()
if not use_xsendfile and self.method != 'HEAD':
fileobj = file(path, 'rb')
file_wrapper = self.environ.get('wsgi.file_wrapper', _FileWrapper)
self._response = file_wrapper(fileobj, 4096)
raise RequestDone
def read(self, size=None):
"""Read the specified number of bytes from the request body."""
fileobj = self.environ['wsgi.input']
if size is None:
size = self.get_header('Content-Length')
if size is None:
size = -1
else:
size = int(size)
data = fileobj.read(size)
return data
def write(self, data):
"""Write the given data to the response body.
`data` *must* be a `str` string, encoded with the charset
which has been specified in the ''Content-Type'' header
or 'utf-8' otherwise.
Note that the ''Content-Length'' header must have been specified.
Its value either corresponds to the length of `data`, or, if there
are multiple calls to `write`, to the cumulated length of the `data`
arguments.
"""
if not self._write:
self.end_headers()
if not hasattr(self, '_content_length'):
raise RuntimeError("No Content-Length header set")
if isinstance(data, unicode):
raise ValueError("Can't send unicode content")
try:
self._write(data)
except (IOError, socket.error), e:
if e.args[0] in (errno.EPIPE, errno.ECONNRESET, 10053, 10054):
raise RequestDone
raise
# Internal methods
def _parse_arg_list(self):
"""Parse the supplied request parameters into a list of
`(name, value)` tuples.
"""
fp = self.environ['wsgi.input']
# Avoid letting cgi.FieldStorage consume the input stream when the
# request does not contain form data
ctype = self.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype not in ('application/x-www-form-urlencoded',
'multipart/form-data'):
fp = StringIO('')
# Python 2.6 introduced a backwards incompatible change for
# FieldStorage where QUERY_STRING is no longer ignored for POST
# requests. We'll keep the pre 2.6 behaviour for now...
if self.method == 'POST':
qs_on_post = self.environ.pop('QUERY_STRING', '')
fs = _FieldStorage(fp, environ=self.environ, keep_blank_values=True)
if self.method == 'POST':
self.environ['QUERY_STRING'] = qs_on_post
args = []
for value in fs.list or ():
name = value.name
if not value.filename:
value = unicode(value.value, 'utf-8')
args.append((name, value))
return args
def _parse_cookies(self):
cookies = Cookie()
header = self.get_header('Cookie')
if header:
cookies.load(header, ignore_parse_errors=True)
return cookies
def _parse_headers(self):
headers = [(name[5:].replace('_', '-').lower(), value)
for name, value in self.environ.items()
if name.startswith('HTTP_')]
if 'CONTENT_LENGTH' in self.environ:
headers.append(('content-length', self.environ['CONTENT_LENGTH']))
if 'CONTENT_TYPE' in self.environ:
headers.append(('content-type', self.environ['CONTENT_TYPE']))
return headers
def _parse_languages(self):
"""The list of languages preferred by the remote user, taken from the
``Accept-Language`` header.
"""
header = self.get_header('Accept-Language') or 'en-us'
langs = []
for i, lang in enumerate(header.split(',')):
code, params = cgi.parse_header(lang)
q = 1
if 'q' in params:
try:
q = float(params['q'])
except ValueError:
q = 0
langs.append((-q, i, code))
langs.sort()
return [code for q, i, code in langs]
def _reconstruct_url(self):
"""Reconstruct the absolute base URL of the application."""
host = self.get_header('Host')
if not host:
# Missing host header, so reconstruct the host from the
# server name and port
default_port = {'http': 80, 'https': 443}
if self.server_port and self.server_port != \
default_port[self.scheme]:
host = '%s:%d' % (self.server_name, self.server_port)
else:
host = self.server_name
return urlparse.urlunparse((self.scheme, host, self.base_path, None,
None, None))
def _send_cookie_headers(self):
for name in self.outcookie.keys():
path = self.outcookie[name].get('path')
if path:
path = path.replace(' ', '%20') \
.replace(';', '%3B') \
.replace(',', '%3C')
self.outcookie[name]['path'] = path
cookies = to_unicode(self.outcookie.output(header='')).encode('utf-8')
for cookie in cookies.splitlines():
self._outheaders.append(('Set-Cookie', cookie.strip()))
__no_apidoc__ = _HTTPException_subclass_names
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/fcgi_frontend.py | trac/trac/web/fcgi_frontend.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Matthew Good <trac@matt-good.net>
import os
import pkg_resources
import urllib
from trac import __version__ as VERSION
from trac.web.main import dispatch_request
use_flup = os.environ.get('TRAC_USE_FLUP', False)
if use_flup in ('0', 'no', 'off'):
use_flup = False
class FlupMiddleware(object):
"""Flup doesn't URL unquote the PATH_INFO, so we need to do it."""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
environ['PATH_INFO'] = urllib.unquote(environ.get('PATH_INFO', ''))
return self.application(environ, start_response)
params = {}
if use_flup:
try:
from flup.server.fcgi import WSGIServer
params['maxThreads'] = 15
dispatch_request = FlupMiddleware(dispatch_request)
except ImportError:
use_flup = False
if not use_flup:
from _fcgi import WSGIServer
def run():
WSGIServer(dispatch_request, **params).run()
if __name__ == '__main__':
pkg_resources.require('Trac==%s' % VERSION)
run()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/_fcgi.py | trac/trac/web/_fcgi.py | # -*- coding: iso-8859-1 -*-
#
# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi <allan@saddi.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Allan Saddi <allan@saddi.com>
"""
fcgi - a FastCGI/WSGI gateway.
For more information about FastCGI, see <http://www.fastcgi.com/>.
For more information about the Web Server Gateway Interface, see
<http://www.python.org/peps/pep-0333.html>.
Example usage:
#!/usr/bin/env python
from myapplication import app # Assume app is your WSGI application object
from fcgi import WSGIServer
WSGIServer(app).run()
See the documentation for WSGIServer/Server for more information.
On most platforms, fcgi will fallback to regular CGI behavior if run in a
non-FastCGI context. If you want to force CGI behavior, set the environment
variable FCGI_FORCE_CGI to "Y" or "y".
"""
__author__ = 'Allan Saddi <allan@saddi.com>'
__version__ = '$Revision: 2025 $'
import sys
import os
import signal
import struct
import cStringIO as StringIO
import select
import socket
import errno
import traceback
try:
import thread
import threading
thread_available = True
except ImportError:
import dummy_thread as thread
import dummy_threading as threading
thread_available = False
# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
if not hasattr(socket, 'SHUT_WR'):
socket.SHUT_WR = 1
__all__ = ['WSGIServer']
# Constants from the spec.
FCGI_LISTENSOCK_FILENO = 0
FCGI_HEADER_LEN = 8
FCGI_VERSION_1 = 1
FCGI_BEGIN_REQUEST = 1
FCGI_ABORT_REQUEST = 2
FCGI_END_REQUEST = 3
FCGI_PARAMS = 4
FCGI_STDIN = 5
FCGI_STDOUT = 6
FCGI_STDERR = 7
FCGI_DATA = 8
FCGI_GET_VALUES = 9
FCGI_GET_VALUES_RESULT = 10
FCGI_UNKNOWN_TYPE = 11
FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
FCGI_NULL_REQUEST_ID = 0
FCGI_KEEP_CONN = 1
FCGI_RESPONDER = 1
FCGI_AUTHORIZER = 2
FCGI_FILTER = 3
FCGI_REQUEST_COMPLETE = 0
FCGI_CANT_MPX_CONN = 1
FCGI_OVERLOADED = 2
FCGI_UNKNOWN_ROLE = 3
FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
FCGI_MAX_REQS = 'FCGI_MAX_REQS'
FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
FCGI_Header = '!BBHHBx'
FCGI_BeginRequestBody = '!HB5x'
FCGI_EndRequestBody = '!LB3x'
FCGI_UnknownTypeBody = '!B7x'
FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
if __debug__:
import time
# Set non-zero to write debug output to a file.
DEBUG = 0
DEBUGLOG = '/tmp/fcgi.log'
def _debug(level, msg):
if DEBUG < level:
return
try:
f = open(DEBUGLOG, 'a')
f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
f.close()
except:
pass
class InputStream(object):
"""
File-like object representing FastCGI input streams (FCGI_STDIN and
FCGI_DATA). Supports the minimum methods required by WSGI spec.
"""
def __init__(self, conn):
self._conn = conn
# See Server.
self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
self._buf = ''
self._bufList = []
self._pos = 0 # Current read position.
self._avail = 0 # Number of bytes currently available.
self._eof = False # True when server has sent EOF notification.
def _shrinkBuffer(self):
"""Gets rid of already read data (since we can't rewind)."""
if self._pos >= self._shrinkThreshold:
self._buf = self._buf[self._pos:]
self._avail -= self._pos
self._pos = 0
assert self._avail >= 0
def _waitForData(self):
"""Waits for more data to become available."""
self._conn.process_input()
def read(self, n=-1):
if self._pos == self._avail and self._eof:
return ''
while True:
if n < 0 or (self._avail - self._pos) < n:
# Not enough data available.
if self._eof:
# And there's no more coming.
newPos = self._avail
break
else:
# Wait for more data.
self._waitForData()
continue
else:
newPos = self._pos + n
break
# Merge buffer list, if necessary.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readline(self, length=None):
if self._pos == self._avail and self._eof:
return ''
while True:
# Unfortunately, we need to merge the buffer list early.
if self._bufList:
self._buf += ''.join(self._bufList)
self._bufList = []
# Find newline.
i = self._buf.find('\n', self._pos)
if i < 0:
# Not found?
if self._eof:
# No more data coming.
newPos = self._avail
break
else:
# Wait for more to come.
self._waitForData()
continue
else:
newPos = i + 1
break
if length is not None:
if self._pos + length < newPos:
newPos = self._pos + length
r = self._buf[self._pos:newPos]
self._pos = newPos
self._shrinkBuffer()
return r
def readlines(self, sizehint=0):
total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if 0 < sizehint <= total:
break
line = self.readline()
return lines
def __iter__(self):
return self
def next(self):
r = self.readline()
if not r:
raise StopIteration
return r
def add_data(self, data):
if not data:
self._eof = True
else:
self._bufList.append(data)
self._avail += len(data)
class MultiplexedInputStream(InputStream):
"""
A version of InputStream meant to be used with MultiplexedConnections.
Assumes the MultiplexedConnection (the producer) and the Request
(the consumer) are running in different threads.
"""
def __init__(self, conn):
super(MultiplexedInputStream, self).__init__(conn)
# Arbitrates access to this InputStream (it's used simultaneously
# by a Request and its owning Connection object).
lock = threading.RLock()
# Notifies Request thread that there is new data available.
self._lock = threading.Condition(lock)
def _waitForData(self):
# Wait for notification from add_data().
self._lock.wait()
def read(self, n=-1):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).read(n)
finally:
self._lock.release()
def readline(self, length=None):
self._lock.acquire()
try:
return super(MultiplexedInputStream, self).readline(length)
finally:
self._lock.release()
def add_data(self, data):
self._lock.acquire()
try:
super(MultiplexedInputStream, self).add_data(data)
self._lock.notify()
finally:
self._lock.release()
class OutputStream(object):
"""
FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
write() or writelines() immediately result in Records being sent back
to the server. Buffering should be done in a higher level!
"""
def __init__(self, conn, req, type, buffered=False):
self._conn = conn
self._req = req
self._type = type
self._buffered = buffered
self._bufList = [] # Used if buffered is True
self.dataWritten = False
self.closed = False
def _write(self, data):
length = len(data)
while length:
toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
rec = Record(self._type, self._req.requestId)
rec.contentLength = toWrite
rec.contentData = data[:toWrite]
self._conn.writeRecord(rec)
data = data[toWrite:]
length -= toWrite
def write(self, data):
assert not self.closed
if not data:
return
self.dataWritten = True
if self._buffered:
self._bufList.append(data)
else:
self._write(data)
def writelines(self, lines):
assert not self.closed
for line in lines:
self.write(line)
def flush(self):
# Only need to flush if this OutputStream is actually buffered.
if self._buffered:
data = ''.join(self._bufList)
self._bufList = []
self._write(data)
# Though available, the following should NOT be called by WSGI apps.
def close(self):
"""Sends end-of-stream notification, if necessary."""
if not self.closed and self.dataWritten:
self.flush()
rec = Record(self._type, self._req.requestId)
self._conn.writeRecord(rec)
self.closed = True
class TeeOutputStream(object):
"""
Simple wrapper around two or more output file-like objects that copies
written data to all streams.
"""
def __init__(self, streamList):
self._streamList = streamList
def write(self, data):
for f in self._streamList:
f.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def flush(self):
for f in self._streamList:
f.flush()
class StdoutWrapper(object):
"""
Wrapper for sys.stdout so we know if data has actually been written.
"""
def __init__(self, stdout):
self._file = stdout
self.dataWritten = False
def write(self, data):
if data:
self.dataWritten = True
self._file.write(data)
def writelines(self, lines):
for line in lines:
self.write(line)
def __getattr__(self, name):
return getattr(self._file, name)
def decode_pair(s, pos=0):
"""
Decodes a name/value pair.
The number of bytes decoded as well as the name/value pair
are returned.
"""
nameLength = ord(s[pos])
if nameLength & 128:
nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
valueLength = ord(s[pos])
if valueLength & 128:
valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
pos += 4
else:
pos += 1
name = s[pos:pos+nameLength]
pos += nameLength
value = s[pos:pos+valueLength]
pos += valueLength
return (pos, (name, value))
def encode_pair(name, value):
"""
Encodes a name/value pair.
The encoded string is returned.
"""
nameLength = len(name)
if nameLength < 128:
s = chr(nameLength)
else:
s = struct.pack('!L', nameLength | 0x80000000L)
valueLength = len(value)
if valueLength < 128:
s += chr(valueLength)
else:
s += struct.pack('!L', valueLength | 0x80000000L)
return s + name + value
class Record(object):
"""
A FastCGI Record.
Used for encoding/decoding records.
"""
def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
self.version = FCGI_VERSION_1
self.type = type
self.requestId = requestId
self.contentLength = 0
self.paddingLength = 0
self.contentData = ''
@staticmethod
def _recvall(sock, length):
"""
Attempts to receive length bytes from a socket, blocking if necessary.
(Socket may be blocking or non-blocking.)
"""
dataList = []
recvLen = 0
while length:
try:
data = sock.recv(length)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([sock], [], [])
continue
else:
raise
if not data: # EOF
break
dataList.append(data)
dataLen = len(data)
recvLen += dataLen
length -= dataLen
return ''.join(dataList), recvLen
def read(self, sock):
"""Read and decode a Record from a socket."""
try:
header, length = self._recvall(sock, FCGI_HEADER_LEN)
except:
raise EOFError
if length < FCGI_HEADER_LEN:
raise EOFError
self.version, self.type, self.requestId, self.contentLength, \
self.paddingLength = struct.unpack(FCGI_Header, header)
if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
if self.contentLength:
try:
self.contentData, length = self._recvall(sock,
self.contentLength)
except:
raise EOFError
if length < self.contentLength:
raise EOFError
if self.paddingLength:
try:
self._recvall(sock, self.paddingLength)
except:
raise EOFError
@staticmethod
def _sendall(sock, data):
"""
Writes data to a socket and does not return until all the data is sent.
"""
length = len(data)
while length:
try:
sent = sock.send(data)
except socket.error, e:
if e[0] == errno.EAGAIN:
select.select([], [sock], [])
continue
else:
raise
data = data[sent:]
length -= sent
def write(self, sock):
"""Encode and write a Record to a socket."""
self.paddingLength = -self.contentLength & 7
if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
'contentLength = %d' %
(sock.fileno(), self.type, self.requestId,
self.contentLength))
header = struct.pack(FCGI_Header, self.version, self.type,
self.requestId, self.contentLength,
self.paddingLength)
self._sendall(sock, header)
if self.contentLength:
self._sendall(sock, self.contentData)
if self.paddingLength:
self._sendall(sock, '\x00'*self.paddingLength)
class Request(object):
"""
Represents a single FastCGI request.
These objects are passed to your handler and is the main interface
between your handler and the fcgi module. The methods should not
be called by your handler. However, server, params, stdin, stdout,
stderr, and data are free for your handler's use.
"""
def __init__(self, conn, inputStreamClass):
self._conn = conn
self.server = conn.server
self.params = {}
self.stdin = inputStreamClass(conn)
self.stdout = OutputStream(conn, self, FCGI_STDOUT)
self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
self.data = inputStreamClass(conn)
def run(self):
"""Runs the handler, flushes the streams, and ends the request."""
try:
protocolStatus, appStatus = self.server.handler(self)
except:
traceback.print_exc(file=self.stderr)
self.stderr.flush()
if not self.stdout.dataWritten:
self.server.error(self)
protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
(protocolStatus, appStatus))
try:
self._flush()
self._end(appStatus, protocolStatus)
except socket.error, e:
if e[0] != errno.EPIPE:
raise
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
self._conn.end_request(self, appStatus, protocolStatus)
def _flush(self):
self.stdout.close()
self.stderr.close()
class CGIRequest(Request):
"""A normal CGI request disguised as a FastCGI request."""
def __init__(self, server):
# These are normally filled in by Connection.
self.requestId = 1
self.role = FCGI_RESPONDER
self.flags = 0
self.aborted = False
self.server = server
self.params = dict(os.environ)
self.stdin = sys.stdin
self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
self.stderr = sys.stderr
self.data = StringIO.StringIO()
def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
sys.exit(appStatus)
def _flush(self):
# Not buffered, do nothing.
pass
class Connection(object):
"""
A Connection with the web server.
Each Connection is associated with a single socket (which is
connected to the web server) and is responsible for handling all
the FastCGI message processing for that socket.
"""
_multiplexed = False
_inputStreamClass = InputStream
def __init__(self, sock, addr, server):
self._sock = sock
self._addr = addr
self.server = server
# Active Requests for this Connection, mapped by request ID.
self._requests = {}
def _cleanupSocket(self):
"""Close the Connection's socket."""
try:
self._sock.shutdown(socket.SHUT_WR)
except:
return
try:
while True:
r, w, e = select.select([self._sock], [], [])
if not r or not self._sock.recv(1024):
break
except:
pass
self._sock.close()
def run(self):
"""Begin processing data from the socket."""
self._keepGoing = True
while self._keepGoing:
try:
self.process_input()
except EOFError:
break
except (select.error, socket.error), e:
if e[0] == errno.EBADF: # Socket was closed by Request.
break
raise
self._cleanupSocket()
def process_input(self):
"""Attempt to read a single Record from the socket and process it."""
# Currently, any children Request threads notify this Connection
# that it is no longer needed by closing the Connection's socket.
# We need to put a timeout on select, otherwise we might get
# stuck in it indefinitely... (I don't like this solution.)
while self._keepGoing:
try:
r, w, e = select.select([self._sock], [], [], 1.0)
except ValueError:
# Sigh. ValueError gets thrown sometimes when passing select
# a closed socket.
raise EOFError
if r: break
if not self._keepGoing:
return
rec = Record()
rec.read(self._sock)
if rec.type == FCGI_GET_VALUES:
self._do_get_values(rec)
elif rec.type == FCGI_BEGIN_REQUEST:
self._do_begin_request(rec)
elif rec.type == FCGI_ABORT_REQUEST:
self._do_abort_request(rec)
elif rec.type == FCGI_PARAMS:
self._do_params(rec)
elif rec.type == FCGI_STDIN:
self._do_stdin(rec)
elif rec.type == FCGI_DATA:
self._do_data(rec)
elif rec.requestId == FCGI_NULL_REQUEST_ID:
self._do_unknown_type(rec)
else:
# Need to complain about this.
pass
def writeRecord(self, rec):
"""
Write a Record to the socket.
"""
rec.write(self._sock)
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
"""
End a Request.
Called by Request objects. An FCGI_END_REQUEST Record is
sent to the web server. If the web server no longer requires
the connection, the socket is closed, thereby ending this
Connection (run() returns).
"""
rec = Record(FCGI_END_REQUEST, req.requestId)
rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
protocolStatus)
rec.contentLength = FCGI_EndRequestBody_LEN
self.writeRecord(rec)
if remove:
del self._requests[req.requestId]
if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
self._cleanupSocket()
self._keepGoing = False
def _do_get_values(self, inrec):
"""Handle an FCGI_GET_VALUES request from the web server."""
outrec = Record(FCGI_GET_VALUES_RESULT)
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
cap = self.server.capability.get(name)
if cap is not None:
outrec.contentData += encode_pair(name, str(cap))
outrec.contentLength = len(outrec.contentData)
self.writeRecord(outrec)
def _do_begin_request(self, inrec):
"""Handle an FCGI_BEGIN_REQUEST from the web server."""
role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
req = self.server.request_class(self, self._inputStreamClass)
req.requestId, req.role, req.flags = inrec.requestId, role, flags
req.aborted = False
if not self._multiplexed and self._requests:
# Can't multiplex requests.
self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
else:
self._requests[inrec.requestId] = req
def _do_abort_request(self, inrec):
"""
Handle an FCGI_ABORT_REQUEST from the web server.
We just mark a flag in the associated Request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
req.aborted = True
def _start_request(self, req):
"""Run the request."""
# Not multiplexed, so run it inline.
req.run()
def _do_params(self, inrec):
"""
Handle an FCGI_PARAMS Record.
If the last FCGI_PARAMS Record is received, start the request.
"""
req = self._requests.get(inrec.requestId)
if req is not None:
if inrec.contentLength:
pos = 0
while pos < inrec.contentLength:
pos, (name, value) = decode_pair(inrec.contentData, pos)
req.params[name] = value
else:
self._start_request(req)
def _do_stdin(self, inrec):
"""Handle the FCGI_STDIN stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.stdin.add_data(inrec.contentData)
def _do_data(self, inrec):
"""Handle the FCGI_DATA stream."""
req = self._requests.get(inrec.requestId)
if req is not None:
req.data.add_data(inrec.contentData)
def _do_unknown_type(self, inrec):
"""Handle an unknown request type. Respond accordingly."""
outrec = Record(FCGI_UNKNOWN_TYPE)
outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
outrec.contentLength = FCGI_UnknownTypeBody_LEN
self.writeRecord(outrec)
class MultiplexedConnection(Connection):
"""
A version of Connection capable of handling multiple requests
simultaneously.
"""
_multiplexed = True
_inputStreamClass = MultiplexedInputStream
def __init__(self, sock, addr, server):
super(MultiplexedConnection, self).__init__(sock, addr, server)
# Used to arbitrate access to self._requests.
lock = threading.RLock()
# Notification is posted everytime a request completes, allowing us
# to quit cleanly.
self._lock = threading.Condition(lock)
def _cleanupSocket(self):
# Wait for any outstanding requests before closing the socket.
self._lock.acquire()
while self._requests:
self._lock.wait()
self._lock.release()
super(MultiplexedConnection, self)._cleanupSocket()
def writeRecord(self, rec):
# Must use locking to prevent intermingling of Records from different
# threads.
self._lock.acquire()
try:
# Probably faster than calling super. ;)
rec.write(self._sock)
finally:
self._lock.release()
def end_request(self, req, appStatus=0L,
protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
self._lock.acquire()
try:
super(MultiplexedConnection, self).end_request(req, appStatus,
protocolStatus,
remove)
self._lock.notify()
finally:
self._lock.release()
def _do_begin_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_begin_request(inrec)
finally:
self._lock.release()
def _do_abort_request(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_abort_request(inrec)
finally:
self._lock.release()
def _start_request(self, req):
thread.start_new_thread(req.run, ())
def _do_params(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_params(inrec)
finally:
self._lock.release()
def _do_stdin(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_stdin(inrec)
finally:
self._lock.release()
def _do_data(self, inrec):
self._lock.acquire()
try:
super(MultiplexedConnection, self)._do_data(inrec)
finally:
self._lock.release()
class Server(object):
"""
The FastCGI server.
Waits for connections from the web server, processing each
request.
If run in a normal CGI context, it will instead instantiate a
CGIRequest and run the handler through there.
"""
request_class = Request
cgirequest_class = CGIRequest
# Limits the size of the InputStream's string buffer to this size + the
# server's maximum Record size. Since the InputStream is not seekable,
# we throw away already-read data once this certain amount has been read.
inputStreamShrinkThreshold = 102400 - 8192
def __init__(self, handler=None, maxwrite=8192, bindAddress=None,
umask=None, multiplexed=False):
"""
handler, if present, must reference a function or method that
takes one argument: a Request object. If handler is not
specified at creation time, Server *must* be subclassed.
(The handler method below is abstract.)
maxwrite is the maximum number of bytes (per Record) to write
to the server. I've noticed mod_fastcgi has a relatively small
receive buffer (8K or so).
bindAddress, if present, must either be a string or a 2-tuple. If
present, run() will open its own listening socket. You would use
this if you wanted to run your application as an 'external' FastCGI
app. (i.e. the webserver would no longer be responsible for starting
your app) If a string, it will be interpreted as a filename and a UNIX
socket will be opened. If a tuple, the first element, a string,
is the interface name/IP to bind to, and the second element (an int)
is the port number.
Set multiplexed to True if you want to handle multiple requests
per connection. Some FastCGI backends (namely mod_fastcgi) don't
multiplex requests at all, so by default this is off (which saves
on thread creation/locking overhead). If threads aren't available,
this keyword is ignored; it's not possible to multiplex requests
at all.
"""
if handler is not None:
self.handler = handler
self.maxwrite = maxwrite
if thread_available:
try:
import resource
# Attempt to glean the maximum number of connections
# from the OS.
maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
except (ImportError, AttributeError):
maxConns = 100 # Just some made up number.
maxReqs = maxConns
if multiplexed:
self._connectionClass = MultiplexedConnection
maxReqs *= 5 # Another made up number.
else:
self._connectionClass = Connection
self.capability = {
FCGI_MAX_CONNS: maxConns,
FCGI_MAX_REQS: maxReqs,
FCGI_MPXS_CONNS: multiplexed and 1 or 0
}
else:
self._connectionClass = Connection
self.capability = {
# If threads aren't available, these are pretty much correct.
FCGI_MAX_CONNS: 1,
FCGI_MAX_REQS: 1,
FCGI_MPXS_CONNS: 0
}
self._bindAddress = bindAddress
self._umask = umask
def _setupSocket(self):
if self._bindAddress is None: # Run as a normal FastCGI?
isFCGI = True
sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
socket.SOCK_STREAM)
try:
sock.getpeername()
except socket.error, e:
if e[0] == errno.ENOTSOCK:
# Not a socket, assume CGI context.
isFCGI = False
elif e[0] != errno.ENOTCONN:
raise
# FastCGI/CGI discrimination is broken on Mac OS X.
# Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
# if you want to run your app as a simple CGI. (You can do
# this with Apache's mod_env [not loaded by default in OS X
# client, ha ha] and the SetEnv directive.)
if not isFCGI or \
os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
req = self.cgirequest_class(self)
req.run()
sys.exit(0)
else:
# Run as a server
oldUmask = None
if type(self._bindAddress) is str:
# Unix socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.unlink(self._bindAddress)
except OSError:
pass
if self._umask is not None:
oldUmask = os.umask(self._umask)
else:
# INET socket
assert type(self._bindAddress) is tuple
assert len(self._bindAddress) == 2
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(self._bindAddress)
sock.listen(socket.SOMAXCONN)
if oldUmask is not None:
os.umask(oldUmask)
return sock
def _cleanupSocket(self, sock):
"""Closes the main socket."""
sock.close()
def _installSignalHandlers(self):
self._oldSIGs = [(x,signal.getsignal(x)) for x in
(signal.SIGHUP, signal.SIGINT, signal.SIGTERM)]
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/main.py | trac/trac/web/main.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
import cgi
import dircache
import fnmatch
from functools import partial
import gc
import locale
import os
import pkg_resources
from pprint import pformat, pprint
import re
import sys
from genshi.builder import Fragment, tag
from genshi.output import DocType
from genshi.template import TemplateLoader
from trac import __version__ as TRAC_VERSION
from trac.config import BoolOption, ExtensionOption, Option, \
OrderedExtensionsOption
from trac.core import *
from trac.env import open_environment
from trac.loader import get_plugin_info, match_plugins_to_frames
from trac.perm import PermissionCache, PermissionError
from trac.resource import ResourceNotFound
from trac.util import arity, get_frame_info, get_last_traceback, hex_entropy, \
read_file, safe_repr, translation
from trac.util.concurrency import threading
from trac.util.datefmt import format_datetime, localtz, timezone, user_time
from trac.util.text import exception_to_unicode, shorten_line, to_unicode
from trac.util.translation import _, get_negotiated_locale, has_babel, \
safefmt, tag_
from trac.web.api import *
from trac.web.chrome import Chrome
from trac.web.href import Href
from trac.web.session import Session
#: This URL is used for semi-automatic bug reports (see
#: `send_internal_error`). Please modify it to point to your own
#: Trac instance if you distribute a patched version of Trac.
default_tracker = 'http://trac.edgewall.org'
class FakeSession(dict):
sid = None
def save(self):
pass
class FakePerm(dict):
def require(self, *args):
return False
def __call__(self, *args):
return self
class RequestWithSession(Request):
"""A request that saves its associated session when sending the reply."""
def send_response(self, code=200):
if code < 400:
self.session.save()
super(RequestWithSession, self).send_response(code)
class RequestDispatcher(Component):
"""Web request dispatcher.
This component dispatches incoming requests to registered
handlers. Besides, it also takes care of user authentication and
request pre- and post-processing.
"""
required = True
authenticators = ExtensionPoint(IAuthenticator)
handlers = ExtensionPoint(IRequestHandler)
filters = OrderedExtensionsOption('trac', 'request_filters',
IRequestFilter,
doc="""Ordered list of filters to apply to all requests
(''since 0.10'').""")
default_handler = ExtensionOption('trac', 'default_handler',
IRequestHandler, 'WikiModule',
"""Name of the component that handles requests to the base
URL.
Options include `TimelineModule`, `RoadmapModule`,
`BrowserModule`, `QueryModule`, `ReportModule`, `TicketModule`
and `WikiModule`. The default is `WikiModule`. (''since 0.9'')""")
default_timezone = Option('trac', 'default_timezone', '',
"""The default timezone to use""")
default_language = Option('trac', 'default_language', '',
"""The preferred language to use if no user preference has
been set. (''since 0.12.1'')
""")
default_date_format = Option('trac', 'default_date_format', '',
"""The date format. Valid options are 'iso8601' for selecting
ISO 8601 format, or leave it empty which means the default
date format will be inferred from the browser's default
language. (''since 1.0'')
""")
use_xsendfile = BoolOption('trac', 'use_xsendfile', 'false',
"""When true, send a `X-Sendfile` header and no content when sending
files from the filesystem, so that the web server handles the content.
This requires a web server that knows how to handle such a header,
like Apache with `mod_xsendfile` or lighttpd. (''since 1.0'')
""")
# Public API
def authenticate(self, req):
for authenticator in self.authenticators:
authname = authenticator.authenticate(req)
if authname:
return authname
else:
return 'anonymous'
def dispatch(self, req):
"""Find a registered handler that matches the request and let
it process it.
In addition, this method initializes the data dictionary
passed to the the template and adds the web site chrome.
"""
self.log.debug('Dispatching %r', req)
chrome = Chrome(self.env)
# Setup request callbacks for lazily-evaluated properties
req.callbacks.update({
'authname': self.authenticate,
'chrome': chrome.prepare_request,
'perm': self._get_perm,
'session': self._get_session,
'locale': self._get_locale,
'lc_time': self._get_lc_time,
'tz': self._get_timezone,
'form_token': self._get_form_token,
'use_xsendfile': self._get_use_xsendfile,
})
try:
try:
# Select the component that should handle the request
chosen_handler = None
try:
for handler in self.handlers:
if handler.match_request(req):
chosen_handler = handler
break
if not chosen_handler:
if not req.path_info or req.path_info == '/':
chosen_handler = self.default_handler
# pre-process any incoming request, whether a handler
# was found or not
chosen_handler = self._pre_process_request(req,
chosen_handler)
except TracError, e:
raise HTTPInternalError(e)
if not chosen_handler:
if req.path_info.endswith('/'):
# Strip trailing / and redirect
target = req.path_info.rstrip('/').encode('utf-8')
if req.query_string:
target += '?' + req.query_string
req.redirect(req.href + target, permanent=True)
raise HTTPNotFound('No handler matched request to %s',
req.path_info)
req.callbacks['chrome'] = partial(chrome.prepare_request,
handler=chosen_handler)
# Protect against CSRF attacks: we validate the form token
# for all POST requests with a content-type corresponding
# to form submissions
if req.method == 'POST':
ctype = req.get_header('Content-Type')
if ctype:
ctype, options = cgi.parse_header(ctype)
if ctype in ('application/x-www-form-urlencoded',
'multipart/form-data') and \
req.args.get('__FORM_TOKEN') != req.form_token:
if self.env.secure_cookies and req.scheme == 'http':
msg = _('Secure cookies are enabled, you must '
'use https to submit forms.')
else:
msg = _('Do you have cookies enabled?')
raise HTTPBadRequest(_('Missing or invalid form token.'
' %(msg)s', msg=msg))
# Process the request and render the template
resp = chosen_handler.process_request(req)
if resp:
if len(resp) == 2: # old Clearsilver template and HDF data
self.log.error("Clearsilver template are no longer "
"supported (%s)", resp[0])
raise TracError(
_("Clearsilver templates are no longer supported, "
"please contact your Trac administrator."))
# Genshi
template, data, content_type = \
self._post_process_request(req, *resp)
if 'hdfdump' in req.args:
req.perm.require('TRAC_ADMIN')
# debugging helper - no need to render first
out = StringIO()
pprint(data, out)
req.send(out.getvalue(), 'text/plain')
output = chrome.render_template(req, template, data,
content_type)
req.send(output, content_type or 'text/html')
else:
self._post_process_request(req)
except RequestDone:
raise
except:
# post-process the request in case of errors
err = sys.exc_info()
try:
self._post_process_request(req)
except RequestDone:
raise
except Exception, e:
self.log.error("Exception caught while post-processing"
" request: %s",
exception_to_unicode(e, traceback=True))
raise err[0], err[1], err[2]
except PermissionError, e:
raise HTTPForbidden(to_unicode(e))
except ResourceNotFound, e:
raise HTTPNotFound(e)
except TracError, e:
raise HTTPInternalError(e)
# Internal methods
def _get_perm(self, req):
if isinstance(req.session, FakeSession):
return FakePerm()
else:
return PermissionCache(self.env, self.authenticate(req))
def _get_session(self, req):
try:
return Session(self.env, req)
except TracError, e:
self.log.error("can't retrieve session: %s",
exception_to_unicode(e))
return FakeSession()
def _get_locale(self, req):
if has_babel:
preferred = req.session.get('language')
default = self.env.config.get('trac', 'default_language', '')
negotiated = get_negotiated_locale([preferred, default] +
req.languages)
self.log.debug("Negotiated locale: %s -> %s", preferred, negotiated)
return negotiated
def _get_lc_time(self, req):
lc_time = req.session.get('lc_time')
if not lc_time or lc_time == 'locale' and not has_babel:
lc_time = self.default_date_format
if lc_time == 'iso8601':
return 'iso8601'
return req.locale
def _get_timezone(self, req):
try:
return timezone(req.session.get('tz', self.default_timezone
or 'missing'))
except Exception:
return localtz
def _get_form_token(self, req):
"""Used to protect against CSRF.
The 'form_token' is strong shared secret stored in a user
cookie. By requiring that every POST form to contain this
value we're able to protect against CSRF attacks. Since this
value is only known by the user and not by an attacker.
If the the user does not have a `trac_form_token` cookie a new
one is generated.
"""
if req.incookie.has_key('trac_form_token'):
return req.incookie['trac_form_token'].value
else:
req.outcookie['trac_form_token'] = hex_entropy(24)
req.outcookie['trac_form_token']['path'] = req.base_path or '/'
if self.env.secure_cookies:
req.outcookie['trac_form_token']['secure'] = True
if sys.version_info >= (2, 6):
req.outcookie['trac_form_token']['httponly'] = True
return req.outcookie['trac_form_token'].value
def _get_use_xsendfile(self, req):
return self.use_xsendfile
def _pre_process_request(self, req, chosen_handler):
for filter_ in self.filters:
chosen_handler = filter_.pre_process_request(req, chosen_handler)
return chosen_handler
def _post_process_request(self, req, *args):
nbargs = len(args)
resp = args
for f in reversed(self.filters):
# As the arity of `post_process_request` has changed since
# Trac 0.10, only filters with same arity gets passed real values.
# Errors will call all filters with None arguments,
# and results will not be not saved.
extra_arg_count = arity(f.post_process_request) - 1
if extra_arg_count == nbargs:
resp = f.post_process_request(req, *resp)
elif nbargs == 0:
f.post_process_request(req, *(None,)*extra_arg_count)
return resp
_slashes_re = re.compile(r'/+')
def dispatch_request(environ, start_response):
"""Main entry point for the Trac web interface.
:param environ: the WSGI environment dict
:param start_response: the WSGI callback for starting the response
"""
# SCRIPT_URL is an Apache var containing the URL before URL rewriting
# has been applied, so we can use it to reconstruct logical SCRIPT_NAME
script_url = environ.get('SCRIPT_URL')
if script_url is not None:
path_info = environ.get('PATH_INFO')
if not path_info:
environ['SCRIPT_NAME'] = script_url
else:
# mod_wsgi squashes slashes in PATH_INFO (!)
script_url = _slashes_re.sub('/', script_url)
path_info = _slashes_re.sub('/', path_info)
if script_url.endswith(path_info):
environ['SCRIPT_NAME'] = script_url[:-len(path_info)]
# If the expected configuration keys aren't found in the WSGI environment,
# try looking them up in the process environment variables
environ.setdefault('trac.env_path', os.getenv('TRAC_ENV'))
environ.setdefault('trac.env_parent_dir',
os.getenv('TRAC_ENV_PARENT_DIR'))
environ.setdefault('trac.env_index_template',
os.getenv('TRAC_ENV_INDEX_TEMPLATE'))
environ.setdefault('trac.template_vars',
os.getenv('TRAC_TEMPLATE_VARS'))
environ.setdefault('trac.locale', '')
environ.setdefault('trac.base_url',
os.getenv('TRAC_BASE_URL'))
environ.setdefault('trac.bootstrap_handler',
os.getenv('TRAC_BOOTSTRAP_HANDLER'))
locale.setlocale(locale.LC_ALL, environ['trac.locale'])
# Load handler for environment lookup and instantiation of request objects
from trac.hooks import load_bootstrap_handler
bootstrap_ep = environ['trac.bootstrap_handler']
bootstrap = load_bootstrap_handler(bootstrap_ep, environ.get('wsgi.errors'))
# Determine the environment
env = env_error = None
try:
env = bootstrap.open_environment(environ, start_response)
except RequestDone:
return []
except EnvironmentError, e:
if e.__class__ is EnvironmentError:
raise
else:
env_error = e
except Exception, e:
env_error = e
else:
try:
if env.base_url_for_redirect:
environ['trac.base_url'] = env.base_url
# Web front-end type and version information
if not hasattr(env, 'webfrontend'):
mod_wsgi_version = environ.get('mod_wsgi.version')
if mod_wsgi_version:
mod_wsgi_version = (
"%s (WSGIProcessGroup %s WSGIApplicationGroup %s)" %
('.'.join([str(x) for x in mod_wsgi_version]),
environ.get('mod_wsgi.process_group'),
environ.get('mod_wsgi.application_group') or
'%{GLOBAL}'))
environ.update({
'trac.web.frontend': 'mod_wsgi',
'trac.web.version': mod_wsgi_version})
env.webfrontend = environ.get('trac.web.frontend')
if env.webfrontend:
env.systeminfo.append((env.webfrontend,
environ['trac.web.version']))
except Exception, e:
env_error = e
run_once = environ['wsgi.run_once']
req = None
if env_error is None:
try:
req = bootstrap.create_request(env, environ, start_response) \
if env is not None else Request(environ, start_response)
except Exception, e:
log = environ.get('wsgi.errors')
if log:
log.write("[FAIL] [Trac] Entry point '%s' "
"Method 'create_request' Reason %s" %
(bootstrap_ep, repr(exception_to_unicode(e))))
if req is None:
req = RequestWithSession(environ, start_response)
translation.make_activable(lambda: req.locale, env.path if env else None)
try:
return _dispatch_request(req, env, env_error)
finally:
translation.deactivate()
if env and not run_once:
env.shutdown(threading._get_ident())
# Now it's a good time to do some clean-ups
#
# Note: enable the '##' lines as soon as there's a suspicion
# of memory leak due to uncollectable objects (typically
# objects with a __del__ method caught in a cycle)
#
##gc.set_debug(gc.DEBUG_UNCOLLECTABLE)
unreachable = gc.collect()
##env.log.debug("%d unreachable objects found.", unreachable)
##uncollectable = len(gc.garbage)
##if uncollectable:
## del gc.garbage[:]
## env.log.warn("%d uncollectable objects found.", uncollectable)
def _dispatch_request(req, env, env_error):
resp = []
# fixup env.abs_href if `[trac] base_url` was not specified
if env and not env.abs_href.base:
env._abs_href = req.abs_href
try:
if not env and env_error:
raise HTTPInternalError(env_error)
try:
dispatcher = RequestDispatcher(env)
dispatcher.dispatch(req)
except RequestDone:
pass
resp = req._response or []
except HTTPException, e:
_send_user_error(req, env, e)
except Exception, e:
send_internal_error(env, req, sys.exc_info())
return resp
def _send_user_error(req, env, e):
# See trac/web/api.py for the definition of HTTPException subclasses.
if env:
env.log.warn('[%s] %s' % (req.remote_addr, exception_to_unicode(e)))
try:
# We first try to get localized error messages here, but we
# should ignore secondary errors if the main error was also
# due to i18n issues
title = _('Error')
if e.reason:
if title.lower() in e.reason.lower():
title = e.reason
else:
title = _('Error: %(message)s', message=e.reason)
except Exception:
title = 'Error'
# The message is based on the e.detail, which can be an Exception
# object, but not a TracError one: when creating HTTPException,
# a TracError.message is directly assigned to e.detail
if isinstance(e.detail, Exception): # not a TracError
message = exception_to_unicode(e.detail)
elif isinstance(e.detail, Fragment): # markup coming from a TracError
message = e.detail
else:
message = to_unicode(e.detail)
data = {'title': title, 'type': 'TracError', 'message': message,
'frames': [], 'traceback': None}
if e.code == 403 and req.authname == 'anonymous':
# TRANSLATOR: ... not logged in, you may want to 'do so' now (link)
do_so = tag.a(_("do so"), href=req.href.login())
req.chrome['notices'].append(
tag_("You are currently not logged in. You may want to "
"%(do_so)s now.", do_so=do_so))
try:
req.send_error(sys.exc_info(), status=e.code, env=env, data=data)
except RequestDone:
pass
def send_internal_error(env, req, exc_info):
if env:
env.log.error("Internal Server Error: %s",
exception_to_unicode(exc_info[1], traceback=True))
message = exception_to_unicode(exc_info[1])
traceback = get_last_traceback()
frames, plugins, faulty_plugins = [], [], []
th = 'http://trac-hacks.org'
has_admin = False
try:
has_admin = 'TRAC_ADMIN' in req.perm
except Exception:
pass
tracker = default_tracker
if has_admin and not isinstance(exc_info[1], MemoryError):
# Collect frame and plugin information
frames = get_frame_info(exc_info[2])
if env:
plugins = [p for p in get_plugin_info(env)
if any(c['enabled']
for m in p['modules'].itervalues()
for c in m['components'].itervalues())]
match_plugins_to_frames(plugins, frames)
# Identify the tracker where the bug should be reported
faulty_plugins = [p for p in plugins if 'frame_idx' in p]
faulty_plugins.sort(key=lambda p: p['frame_idx'])
if faulty_plugins:
info = faulty_plugins[0]['info']
if 'trac' in info:
tracker = info['trac']
elif info.get('home_page', '').startswith(th):
tracker = th
def get_description(_):
if env and has_admin:
sys_info = "".join("|| '''`%s`''' || `%s` ||\n"
% (k, v.replace('\n', '` [[br]] `'))
for k, v in env.get_systeminfo())
sys_info += "|| '''`jQuery`''' || `#JQUERY#` ||\n"
enabled_plugins = "".join("|| '''`%s`''' || `%s` ||\n"
% (p['name'], p['version'] or _('N/A'))
for p in plugins)
else:
sys_info = _("''System information not available''\n")
enabled_plugins = _("''Plugin information not available''\n")
return _("""\
==== How to Reproduce ====
While doing a %(method)s operation on `%(path_info)s`, Trac issued an internal error.
''(please provide additional details here)''
Request parameters:
{{{
%(req_args)s
}}}
User agent: `#USER_AGENT#`
==== System Information ====
%(sys_info)s
==== Enabled Plugins ====
%(enabled_plugins)s
==== Python Traceback ====
{{{
%(traceback)s}}}""",
method=req.method, path_info=req.path_info,
req_args=pformat(req.args), sys_info=sys_info,
enabled_plugins=enabled_plugins, traceback=to_unicode(traceback))
# Generate the description once in English, once in the current locale
description_en = get_description(lambda s, **kw: safefmt(s, kw))
try:
description = get_description(_)
except Exception:
description = description_en
data = {'title': 'Internal Error',
'type': 'internal', 'message': message,
'traceback': traceback, 'frames': frames,
'shorten_line': shorten_line, 'repr': safe_repr,
'plugins': plugins, 'faulty_plugins': faulty_plugins,
'tracker': tracker,
'description': description, 'description_en': description_en}
try:
req.send_error(exc_info, status=500, env=env, data=data)
except RequestDone:
pass
def send_project_index(environ, start_response, parent_dir=None,
env_paths=None):
req = Request(environ, start_response)
loadpaths = [pkg_resources.resource_filename('trac', 'templates')]
if req.environ.get('trac.env_index_template'):
env_index_template = req.environ['trac.env_index_template']
tmpl_path, template = os.path.split(env_index_template)
loadpaths.insert(0, tmpl_path)
else:
template = 'index.html'
data = {'trac': {'version': TRAC_VERSION,
'time': user_time(req, format_datetime)},
'req': req}
if req.environ.get('trac.template_vars'):
for pair in req.environ['trac.template_vars'].split(','):
key, val = pair.split('=')
data[key] = val
try:
href = Href(req.base_path)
projects = []
for env_name, env_path in get_environments(environ).items():
try:
env = open_environment(env_path,
use_cache=not environ['wsgi.run_once'])
proj = {
'env': env,
'name': env.project_name,
'description': env.project_description,
'href': href(env_name)
}
except Exception, e:
proj = {'name': env_name, 'description': to_unicode(e)}
projects.append(proj)
projects.sort(lambda x, y: cmp(x['name'].lower(), y['name'].lower()))
data['projects'] = projects
loader = TemplateLoader(loadpaths, variable_lookup='lenient',
default_encoding='utf-8')
tmpl = loader.load(template)
stream = tmpl.generate(**data)
if template.endswith('.xml'):
output = stream.render('xml')
req.send(output, 'text/xml')
else:
output = stream.render('xhtml', doctype=DocType.XHTML_STRICT,
encoding='utf-8')
req.send(output, 'text/html')
except RequestDone:
pass
def get_tracignore_patterns(env_parent_dir):
"""Return the list of patterns from env_parent_dir/.tracignore or
a default pattern of `".*"` if the file doesn't exist.
"""
path = os.path.join(env_parent_dir, '.tracignore')
try:
lines = [line.strip() for line in read_file(path).splitlines()]
except IOError:
return ['.*']
return [line for line in lines if line and not line.startswith('#')]
def get_environments(environ, warn=False):
"""Retrieve canonical environment name to path mapping.
The environments may not be all valid environments, but they are
good candidates.
"""
env_paths = environ.get('trac.env_paths', [])
env_parent_dir = environ.get('trac.env_parent_dir')
if env_parent_dir:
env_parent_dir = os.path.normpath(env_parent_dir)
paths = dircache.listdir(env_parent_dir)[:]
dircache.annotate(env_parent_dir, paths)
# Filter paths that match the .tracignore patterns
ignore_patterns = get_tracignore_patterns(env_parent_dir)
paths = [path[:-1] for path in paths if path[-1] == '/'
and not any(fnmatch.fnmatch(path[:-1], pattern)
for pattern in ignore_patterns)]
env_paths.extend(os.path.join(env_parent_dir, project) \
for project in paths)
envs = {}
for env_path in env_paths:
env_path = os.path.normpath(env_path)
if not os.path.isdir(env_path):
continue
env_name = os.path.split(env_path)[1]
if env_name in envs:
if warn:
print >> sys.stderr, ('Warning: Ignoring project "%s" since '
'it conflicts with project "%s"'
% (env_path, envs[env_name]))
else:
envs[env_name] = env_path
return envs
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/standalone.py | trac/trac/web/standalone.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005-2006 Matthew Good <trac@matt-good.net>
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Matthew Good <trac@matt-good.net>
# Christopher Lenz <cmlenz@gmx.de>
import pkg_resources
import os
import socket
import select
import sys
from SocketServer import ThreadingMixIn
from trac import __version__ as VERSION
from trac.hooks import load_bootstrap_handler
from trac.util import autoreload, daemon
from trac.web.auth import BasicAuthentication, DigestAuthentication
from trac.web.main import dispatch_request
from trac.web.wsgi import WSGIServer, WSGIRequestHandler
class AuthenticationMiddleware(object):
def __init__(self, application, auths, single_env_name=None):
self.application = application
self.auths = auths
self.single_env_name = single_env_name
if single_env_name:
self.part = 0
else:
self.part = 1
def __call__(self, environ, start_response):
path_info = environ.get('PATH_INFO', '')
path_parts = filter(None, path_info.split('/'))
if len(path_parts) > self.part and path_parts[self.part] == 'login':
env_name = self.single_env_name or path_parts[0]
if env_name:
auth = self.auths.get(env_name, self.auths.get('*'))
if auth:
remote_user = auth.do_auth(environ, start_response)
if not remote_user:
return []
environ['REMOTE_USER'] = remote_user
return self.application(environ, start_response)
class BootstrapAuthenticationMiddleware(AuthenticationMiddleware):
"""Authentication middleware for custom web bootstrap handlers
"""
def __call__(self, environ, start_response):
bootstrap_ep = os.getenv('TRAC_BOOTSTRAP_HANDLER')
environ.setdefault('trac.bootstrap_handler', bootstrap_ep)
# Preserve original environ and probe dispatching
temp_environ = environ.copy()
bootstrap = load_bootstrap_handler(bootstrap_ep)
bootstrap.probe_environment(temp_environ)
path_info = temp_environ.get('PATH_INFO', '')
path_parts = filter(None, path_info.split('/'))
env_name = temp_environ.get('trac.env_name')
if path_parts and path_parts[0] == 'login' and env_name:
auth = self.auths.get(env_name, self.auths.get('*'))
if auth:
remote_user = auth.do_auth(environ, start_response)
if not remote_user:
return []
environ['REMOTE_USER'] = remote_user
return self.application(environ, start_response)
class BasePathMiddleware(object):
def __init__(self, application, base_path):
self.base_path = '/' + base_path.strip('/')
self.application = application
def __call__(self, environ, start_response):
path = environ['SCRIPT_NAME'] + environ.get('PATH_INFO', '')
environ['PATH_INFO'] = path[len(self.base_path):]
environ['SCRIPT_NAME'] = self.base_path
return self.application(environ, start_response)
class TracEnvironMiddleware(object):
def __init__(self, application, env_parent_dir, env_paths, single_env):
self.application = application
self.environ = {}
self.environ['trac.env_path'] = None
if env_parent_dir:
self.environ['trac.env_parent_dir'] = env_parent_dir
elif single_env:
self.environ['trac.env_path'] = env_paths[0]
else:
self.environ['trac.env_paths'] = env_paths
def __call__(self, environ, start_response):
for k, v in self.environ.iteritems():
environ.setdefault(k, v)
return self.application(environ, start_response)
class TracHTTPServer(ThreadingMixIn, WSGIServer):
daemon_threads = True
def __init__(self, server_address, application, env_parent_dir, env_paths,
use_http_11=False):
request_handlers = (TracHTTPRequestHandler, TracHTTP11RequestHandler)
WSGIServer.__init__(self, server_address, application,
request_handler=request_handlers[bool(use_http_11)])
if sys.version_info < (2, 6):
def serve_forever(self, poll_interval=0.5):
while True:
r, w, e = select.select([self], [], [], poll_interval)
if self in r:
self.handle_request()
class TracHTTPRequestHandler(WSGIRequestHandler):
server_version = 'tracd/' + VERSION
def address_string(self):
# Disable reverse name lookups
return self.client_address[:2][0]
class TracHTTP11RequestHandler(TracHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def main():
from optparse import OptionParser, OptionValueError
parser = OptionParser(usage='usage: %prog [options] [projenv] ...',
version='%%prog %s' % VERSION)
auths = {}
def _auth_callback(option, opt_str, value, parser, cls):
info = value.split(',', 3)
if len(info) != 3:
raise OptionValueError("Incorrect number of parameters for %s"
% option)
env_name, filename, realm = info
if env_name in auths:
print >> sys.stderr, 'Ignoring duplicate authentication option ' \
'for project: %s' % env_name
else:
auths[env_name] = cls(os.path.abspath(filename), realm)
def _validate_callback(option, opt_str, value, parser, valid_values):
if value not in valid_values:
raise OptionValueError('%s must be one of: %s, not %s'
% (opt_str, '|'.join(valid_values), value))
setattr(parser.values, option.dest, value)
def _octal(option, opt_str, value, parser):
try:
setattr(parser.values, option.dest, int(value, 8))
except ValueError:
raise OptionValueError('Invalid octal umask value: %r' % value)
parser.add_option('-a', '--auth', action='callback', type='string',
metavar='DIGESTAUTH', callback=_auth_callback,
callback_args=(DigestAuthentication,),
help='[projectdir],[htdigest_file],[realm]')
parser.add_option('--basic-auth', action='callback', type='string',
metavar='BASICAUTH', callback=_auth_callback,
callback_args=(BasicAuthentication,),
help='[projectdir],[htpasswd_file],[realm]')
parser.add_option('-p', '--port', action='store', type='int', dest='port',
help='the port number to bind to')
parser.add_option('-b', '--hostname', action='store', dest='hostname',
help='the host name or IP address to bind to')
parser.add_option('--protocol', action='callback', type="string",
dest='protocol', callback=_validate_callback,
callback_args=(('http', 'scgi', 'ajp', 'fcgi'),),
help='http|scgi|ajp|fcgi')
parser.add_option('-q', '--unquote', action='store_true',
dest='unquote',
help='unquote PATH_INFO (may be needed when using ajp)')
parser.add_option('--http10', action='store_false', dest='http11',
help='use HTTP/1.0 protocol version instead of HTTP/1.1')
parser.add_option('--http11', action='store_true', dest='http11',
help='use HTTP/1.1 protocol version (default)')
parser.add_option('-e', '--env-parent-dir', action='store',
dest='env_parent_dir', metavar='PARENTDIR',
help='parent directory of the project environments')
parser.add_option('--base-path', action='store', type='string', # XXX call this url_base_path?
dest='base_path',
help='the initial portion of the request URL\'s "path"')
parser.add_option('-r', '--auto-reload', action='store_true',
dest='autoreload',
help='restart automatically when sources are modified')
parser.add_option('-s', '--single-env', action='store_true',
dest='single_env', help='only serve a single '
'project without the project list', default=False)
if os.name == 'posix':
parser.add_option('-d', '--daemonize', action='store_true',
dest='daemonize',
help='run in the background as a daemon')
parser.add_option('--pidfile', action='store',
dest='pidfile',
help='when daemonizing, file to which to write pid')
parser.add_option('--umask', action='callback', type='string',
dest='umask', metavar='MASK', callback=_octal,
help='when daemonizing, file mode creation mask '
'to use, in octal notation (default 022)')
try:
import grp, pwd
def _group(option, opt_str, value, parser):
try:
value = int(value)
except ValueError:
try:
value = grp.getgrnam(value)[2]
except KeyError:
raise OptionValueError('group not found: %r' % value)
setattr(parser.values, option.dest, value)
def _user(option, opt_str, value, parser):
try:
value = int(value)
except ValueError:
try:
value = pwd.getpwnam(value)[2]
except KeyError:
raise OptionValueError('user not found: %r' % value)
setattr(parser.values, option.dest, value)
parser.add_option('--group', action='callback', type='string',
dest='group', metavar='GROUP', callback=_group,
help='the group to run as')
parser.add_option('--user', action='callback', type='string',
dest='user', metavar='USER', callback=_user,
help='the user to run as')
except ImportError:
pass
parser.set_defaults(port=None, hostname='', base_path='', daemonize=False,
protocol='http', http11=True, umask=022, user=None,
group=None)
options, args = parser.parse_args()
if not args and not options.env_parent_dir:
parser.error('either the --env-parent-dir option or at least one '
'environment must be specified')
if options.single_env:
if options.env_parent_dir:
parser.error('the --single-env option cannot be used with '
'--env-parent-dir')
elif len(args) > 1:
parser.error('the --single-env option cannot be used with '
'more than one enviroment')
if options.daemonize and options.autoreload:
parser.error('the --auto-reload option cannot be used with '
'--daemonize')
if options.port is None:
options.port = {
'http': 80,
'scgi': 4000,
'ajp': 8009,
'fcgi': 8000,
}[options.protocol]
server_address = (options.hostname, options.port)
# relative paths don't work when daemonized
args = [os.path.abspath(a) for a in args]
if options.env_parent_dir:
options.env_parent_dir = os.path.abspath(options.env_parent_dir)
if parser.has_option('pidfile') and options.pidfile:
options.pidfile = os.path.abspath(options.pidfile)
wsgi_app = dispatch_request
if auths:
if options.single_env:
project_name = os.path.basename(args[0])
wsgi_app = BootstrapAuthenticationMiddleware(wsgi_app, auths, project_name)
else:
wsgi_app = BootstrapAuthenticationMiddleware(wsgi_app, auths)
wsgi_app = TracEnvironMiddleware(wsgi_app,
options.env_parent_dir, args,
options.single_env)
base_path = options.base_path.strip('/')
if base_path:
wsgi_app = BasePathMiddleware(wsgi_app, base_path)
if options.protocol == 'http':
def serve():
addr, port = server_address
if not addr or addr == '0.0.0.0':
loc = '0.0.0.0:%s view at http://127.0.0.1:%s/%s' \
% (port, port, base_path)
else:
loc = 'http://%s:%s/%s' % (addr, port, base_path)
try:
httpd = TracHTTPServer(server_address, wsgi_app,
options.env_parent_dir, args,
use_http_11=options.http11)
except socket.error, e:
print 'Error starting Trac server on %s' % loc
print '[Errno %s] %s' % e.args
sys.exit(1)
print 'Server starting in PID %i.' % os.getpid()
print 'Serving on %s' % loc
if options.http11:
print 'Using HTTP/1.1 protocol version'
httpd.serve_forever()
elif options.protocol in ('scgi', 'ajp', 'fcgi'):
def serve():
server_cls = __import__('flup.server.%s' % options.protocol,
None, None, ['']).WSGIServer
flup_app = wsgi_app
if options.unquote:
from trac.web.fcgi_frontend import FlupMiddleware
flup_app = FlupMiddleware(flup_app)
ret = server_cls(flup_app, bindAddress=server_address).run()
sys.exit(42 if ret else 0) # if SIGHUP exit with status 42
try:
if options.daemonize:
daemon.daemonize(pidfile=options.pidfile, progname='tracd',
umask=options.umask)
if options.group is not None:
os.setgid(options.group)
if options.user is not None:
os.setuid(options.user)
if options.autoreload:
def modification_callback(file):
print >> sys.stderr, 'Detected modification of %s, ' \
'restarting.' % file
autoreload.main(serve, modification_callback)
else:
serve()
except OSError, e:
print >> sys.stderr, '%s: %s' % (e.__class__.__name__, e)
sys.exit(1)
except KeyboardInterrupt:
pass
if __name__ == '__main__':
pkg_resources.require('Trac==%s' % VERSION)
main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/chrome.py | trac/trac/web/chrome.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
"""Content presentation for the web layer.
The Chrome module deals with delivering and shaping content to the end user,
mostly targeting (X)HTML generation but not exclusively, RSS or other forms of
web content are also using facilities provided here.
"""
from __future__ import with_statement
import datetime
from functools import partial
import itertools
import os.path
import pkg_resources
import pprint
import re
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from genshi import Markup
from genshi.builder import tag, Element
from genshi.core import Attrs, START
from genshi.filters import Translator
from genshi.output import DocType
from genshi.template import TemplateLoader, MarkupTemplate, NewTextTemplate
from trac import __version__ as VERSION
from trac.config import *
from trac.core import *
from trac.env import IEnvironmentSetupParticipant, ISystemInfoProvider
from trac.mimeview.api import RenderingContext, get_mimetype
from trac.resource import *
from trac.util import compat, get_reporter_id, presentation, get_pkginfo, \
pathjoin, translation
from trac.util.html import escape, plaintext
from trac.util.text import pretty_size, obfuscate_email_address, \
shorten_line, unicode_quote_plus, to_unicode, \
javascript_quote, exception_to_unicode
from trac.util.datefmt import (
pretty_timedelta, format_datetime, format_date, format_time,
from_utimestamp, http_date, utc, get_date_format_jquery_ui, is_24_hours,
get_time_format_jquery_ui, user_time, get_month_names_jquery_ui,
get_day_names_jquery_ui, get_timezone_list_jquery_ui,
get_first_week_day_jquery_ui)
from trac.util.translation import _, get_available_locales
from trac.web.api import IRequestHandler, ITemplateStreamFilter, HTTPNotFound
from trac.web.href import Href
from trac.wiki import IWikiSyntaxProvider
from trac.wiki.formatter import format_to, format_to_html, format_to_oneliner
class INavigationContributor(Interface):
"""Extension point interface for components that contribute items to the
navigation.
"""
def get_active_navigation_item(req):
"""This method is only called for the `IRequestHandler` processing the
request.
It should return the name of the navigation item that should be
highlighted as active/current.
"""
def get_navigation_items(req):
"""Should return an iterable object over the list of navigation items
to add, each being a tuple in the form (category, name, text).
"""
class ITemplateProvider(Interface):
"""Extension point interface for components that provide their own
Genshi templates and accompanying static resources.
"""
def get_htdocs_dirs():
"""Return a list of directories with static resources (such as style
sheets, images, etc.)
Each item in the list must be a `(prefix, abspath)` tuple. The
`prefix` part defines the path in the URL that requests to these
resources are prefixed with.
The `abspath` is the absolute path to the directory containing the
resources on the local file system.
"""
def get_templates_dirs():
"""Return a list of directories containing the provided template
files.
"""
def add_meta(req, content, http_equiv=None, name=None, scheme=None, lang=None):
"""Add a `<meta>` tag into the `<head>` of the generated HTML."""
meta = {'content': content, 'http-equiv': http_equiv, 'name': name,
'scheme': scheme, 'lang': lang, 'xml:lang': lang}
req.chrome.setdefault('metas', []).append(meta)
def add_link(req, rel, href, title=None, mimetype=None, classname=None,
**attrs):
"""Add a link to the chrome info that will be inserted as <link> element in
the <head> of the generated HTML
"""
linkid = '%s:%s' % (rel, href)
linkset = req.chrome.setdefault('linkset', set())
if linkid in linkset:
return # Already added that link
link = {'href': href, 'title': title, 'type': mimetype, 'class': classname}
link.update(attrs)
links = req.chrome.setdefault('links', {})
links.setdefault(rel, []).append(link)
linkset.add(linkid)
def add_stylesheet(req, filename, mimetype='text/css', media=None):
"""Add a link to a style sheet to the chrome info so that it gets included
in the generated HTML page.
If the filename is absolute (i.e. starts with a slash), the generated link
will be based off the application root path. If it is relative, the link
will be based off the `/chrome/` path.
"""
if filename.startswith(('http://', 'https://')):
href = filename
elif filename.startswith('common/') and 'htdocs_location' in req.chrome:
href = Href(req.chrome['htdocs_location'])(filename[7:])
else:
href = req.href
if not filename.startswith('/'):
href = href.chrome
href = href(filename)
add_link(req, 'stylesheet', href, mimetype=mimetype, media=media)
def add_script(req, filename, mimetype='text/javascript', charset='utf-8',
ie_if=None):
"""Add a reference to an external javascript file to the template.
If the filename is absolute (i.e. starts with a slash), the generated link
will be based off the application root path. If it is relative, the link
will be based off the `/chrome/` path.
"""
scriptset = req.chrome.setdefault('scriptset', set())
if filename in scriptset:
return False # Already added that script
if filename.startswith(('http://', 'https://')):
href = filename
elif filename.startswith('common/') and 'htdocs_location' in req.chrome:
href = Href(req.chrome['htdocs_location'])(filename[7:])
else:
href = req.href
if not filename.startswith('/'):
href = href.chrome
href = href(filename)
script = {'href': href, 'type': mimetype, 'charset': charset,
'prefix': Markup('<!--[if %s]>' % ie_if) if ie_if else None,
'suffix': Markup('<![endif]-->') if ie_if else None}
req.chrome.setdefault('scripts', []).append(script)
scriptset.add(filename)
def add_script_data(req, data={}, **kwargs):
"""Add data to be made available in javascript scripts as global variables.
The keys in `data` and the keyword argument names provide the names of the
global variables. The values are converted to JSON and assigned to the
corresponding variables.
"""
script_data = req.chrome.setdefault('script_data', {})
script_data.update(data)
script_data.update(kwargs)
def add_javascript(req, filename):
""":deprecated: use `add_script` instead."""
add_script(req, filename, mimetype='text/javascript')
def add_warning(req, msg, *args):
"""Add a non-fatal warning to the request object.
When rendering pages, all warnings will be rendered to the user. Note that
the message is escaped (and therefore converted to `Markup`) before it is
stored in the request object.
"""
if args:
msg %= args
msg = escape(msg, False)
if msg not in req.chrome['warnings']:
req.chrome['warnings'].append(msg)
def add_notice(req, msg, *args):
"""Add an informational notice to the request object.
When rendering pages, all notices will be rendered to the user. Note that
the message is escaped (and therefore converted to `Markup`) before it is
stored in the request object.
"""
if args:
msg %= args
msg = escape(msg, False)
if msg not in req.chrome['notices']:
req.chrome['notices'].append(msg)
def add_ctxtnav(req, elm_or_label, href=None, title=None):
"""Add an entry to the current page's ctxtnav bar."""
if href:
elm = tag.a(elm_or_label, href=href, title=title)
else:
elm = elm_or_label
req.chrome.setdefault('ctxtnav', []).append(elm)
def prevnext_nav(req, prev_label, next_label, up_label=None):
"""Add Previous/Up/Next navigation links.
:param req: a `Request` object
:param prev_label: the label to use for left (previous) link
:param up_label: the label to use for the middle (up) link
:param next_label: the label to use for right (next) link
"""
links = req.chrome['links']
prev_link = next_link = None
if not any(lnk in links for lnk in ('prev', 'up', 'next')): # Short circuit
return
if 'prev' in links:
prev = links['prev'][0]
prev_link = tag.a(prev_label, href=prev['href'], title=prev['title'],
class_='prev')
add_ctxtnav(req, tag.span(Markup('← '), prev_link or prev_label,
class_='missing' if not prev_link else None))
if up_label and 'up' in links:
up = links['up'][0]
add_ctxtnav(req, tag.a(up_label, href=up['href'], title=up['title']))
if 'next' in links:
next_ = links['next'][0]
next_link = tag.a(next_label, href=next_['href'], title=next_['title'],
class_='next')
add_ctxtnav(req, tag.span(next_link or next_label, Markup(' →'),
class_='missing' if not next_link else None))
def web_context(req, resource=None, id=False, version=False, parent=False,
absurls=False):
"""Create a rendering context from a request.
The `perm` and `href` properties of the context will be initialized
from the corresponding properties of the request object.
>>> from trac.test import Mock, MockPerm
>>> req = Mock(href=Mock(), perm=MockPerm())
>>> context = web_context(req)
>>> context.href is req.href
True
>>> context.perm is req.perm
True
:param req: the HTTP request object
:param resource: the `Resource` object or realm
:param id: the resource identifier
:param version: the resource version
:param absurls: whether URLs generated by the ``href`` object should
be absolute (including the protocol scheme and host
name)
:return: a new rendering context
:rtype: `RenderingContext`
"""
if req:
href = req.abs_href if absurls else req.href
perm = req.perm
else:
href = None
perm = None
self = RenderingContext(Resource(resource, id=id, version=version,
parent=parent), href=href, perm=perm)
self.req = req
return self
def auth_link(req, link):
"""Return an "authenticated" link to `link` for authenticated users.
If the user is anonymous, returns `link` unchanged. For authenticated
users, returns a link to `/login` that redirects to `link` after
authentication.
"""
if req.authname != 'anonymous':
return req.href.login(referer=link)
return link
def _save_messages(req, url, permanent):
"""Save warnings and notices in case of redirect, so that they can
be displayed after the redirect."""
for type_ in ['warnings', 'notices']:
for (i, message) in enumerate(req.chrome[type_]):
req.session['chrome.%s.%d' % (type_, i)] = escape(message, False)
# Mappings for removal of control characters
_translate_nop = "".join([chr(i) for i in range(256)])
_invalid_control_chars = "".join([chr(i) for i in range(32)
if i not in [0x09, 0x0a, 0x0d]])
class Chrome(Component):
"""Web site chrome assembly manager.
Chrome is everything that is not actual page content.
"""
required = True
implements(ISystemInfoProvider, IEnvironmentSetupParticipant,
IRequestHandler, ITemplateProvider, IWikiSyntaxProvider)
navigation_contributors = ExtensionPoint(INavigationContributor)
template_providers = ExtensionPoint(ITemplateProvider)
stream_filters = ExtensionPoint(ITemplateStreamFilter)
shared_templates_dir = PathOption('inherit', 'templates_dir', '',
"""Path to the //shared templates directory//.
Templates in that directory are loaded in addition to those in the
environments `templates` directory, but the latter take precedence.
(''since 0.11'')""")
shared_htdocs_dir = PathOption('inherit', 'htdocs_dir', '',
"""Path to the //shared htdocs directory//.
Static resources in that directory are mapped to /chrome/shared
under the environment URL, in addition to common and site locations.
This can be useful in site.html for common interface customization
of multiple Trac environments.
(''since 1.0'')""")
auto_reload = BoolOption('trac', 'auto_reload', False,
"""Automatically reload template files after modification.""")
genshi_cache_size = IntOption('trac', 'genshi_cache_size', 128,
"""The maximum number of templates that the template loader will cache
in memory. The default value is 128. You may want to choose a higher
value if your site uses a larger number of templates, and you have
enough memory to spare, or you can reduce it if you are short on
memory.""")
htdocs_location = Option('trac', 'htdocs_location', '',
"""Base URL for serving the core static resources below
`/chrome/common/`.
It can be left empty, and Trac will simply serve those resources
itself.
Advanced users can use this together with
[TracAdmin trac-admin ... deploy <deploydir>] to allow serving the
static resources for Trac directly from the web server.
Note however that this only applies to the `<deploydir>/htdocs/common`
directory, the other deployed resources (i.e. those from plugins)
will not be made available this way and additional rewrite
rules will be needed in the web server.""")
jquery_location = Option('trac', 'jquery_location', '',
"""Location of the jQuery !JavaScript library (version 1.7.2).
An empty value loads jQuery from the copy bundled with Trac.
Alternatively, jQuery could be loaded from a CDN, for example:
http://code.jquery.com/jquery-1.7.2.min.js,
http://ajax.aspnetcdn.com/ajax/jQuery/jquery-1.7.2.min.js or
https://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js.
(''since 1.0'')""")
jquery_ui_location = Option('trac', 'jquery_ui_location', '',
"""Location of the jQuery UI !JavaScript library (version 1.8.21).
An empty value loads jQuery UI from the copy bundled with Trac.
Alternatively, jQuery UI could be loaded from a CDN, for example:
https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.21/jquery-ui.min.js
or
http://ajax.aspnetcdn.com/ajax/jquery.ui/1.8.21/jquery-ui.min.js.
(''since 1.0'')""")
jquery_ui_theme_location = Option('trac', 'jquery_ui_theme_location', '',
"""Location of the theme to be used with the jQuery UI !JavaScript
library (version 1.8.21).
An empty value loads the custom Trac jQuery UI theme from the copy
bundled with Trac.
Alternatively, a jQuery UI theme could be loaded from a CDN, for
example:
https://ajax.googleapis.com/ajax/libs/jqueryui/1.8.21/themes/start/jquery-ui.css
or
http://ajax.aspnetcdn.com/ajax/jquery.ui/1.8.21/themes/start/jquery-ui.css.
(''since 1.0'')""")
metanav_order = ListOption('trac', 'metanav',
'login, logout, prefs, help, about', doc=
"""Order of the items to display in the `metanav` navigation bar,
listed by IDs. See also TracNavigation.""")
mainnav_order = ListOption('trac', 'mainnav',
'wiki, timeline, roadmap, browser, tickets, '
'newticket, search', doc=
"""Order of the items to display in the `mainnav` navigation bar,
listed by IDs. See also TracNavigation.""")
logo_link = Option('header_logo', 'link', '',
"""URL to link to, from the header logo.""")
logo_src = Option('header_logo', 'src', 'site/your_project_logo.png',
"""URL of the image to use as header logo.
It can be absolute, server relative or relative.
If relative, it is relative to one of the `/chrome` locations:
`site/your-logo.png` if `your-logo.png` is located in the `htdocs`
folder within your TracEnvironment;
`common/your-logo.png` if `your-logo.png` is located in the
folder mapped to the [#trac-section htdocs_location] URL.
Only specifying `your-logo.png` is equivalent to the latter.""")
logo_alt = Option('header_logo', 'alt',
"(please configure the [header_logo] section in trac.ini)",
"""Alternative text for the header logo.""")
logo_width = IntOption('header_logo', 'width', -1,
"""Width of the header logo image in pixels.""")
logo_height = IntOption('header_logo', 'height', -1,
"""Height of the header logo image in pixels.""")
show_email_addresses = BoolOption('trac', 'show_email_addresses', 'false',
"""Show email addresses instead of usernames. If false, we obfuscate
email addresses. (''since 0.11'')""")
never_obfuscate_mailto = BoolOption('trac', 'never_obfuscate_mailto',
'false',
"""Never obfuscate `mailto:` links explicitly written in the wiki,
even if `show_email_addresses` is false or the user has not the
EMAIL_VIEW permission (''since 0.11.6'').""")
show_ip_addresses = BoolOption('trac', 'show_ip_addresses', 'false',
"""Show IP addresses for resource edits (e.g. wiki).
(''since 0.11.3'')""")
resizable_textareas = BoolOption('trac', 'resizable_textareas', 'true',
"""Make `<textarea>` fields resizable. Requires !JavaScript.
(''since 0.12'')""")
auto_preview_timeout = FloatOption('trac', 'auto_preview_timeout', 2.0,
"""Inactivity timeout in seconds after which the automatic wiki preview
triggers an update. This option can contain floating-point values. The
lower the setting, the more requests will be made to the server. Set
this to 0 to disable automatic preview. The default is 2.0 seconds.
(''since 0.12'')""")
default_dateinfo_format = Option('trac', 'default_dateinfo_format',
'relative',
"""The date information format. Valid options are 'relative' for
displaying relative format and 'absolute' for displaying absolute
format. (''since 1.0'')
""")
templates = None
# default doctype for 'text/html' output
default_html_doctype = DocType.XHTML_STRICT
# A dictionary of default context data for templates
_default_context_data = {
'_': translation.gettext,
'all': all,
'any': any,
'classes': presentation.classes,
'date': datetime.date,
'datetime': datetime.datetime,
'dgettext': translation.dgettext,
'dngettext': translation.dngettext,
'first_last': presentation.first_last,
'get_reporter_id': get_reporter_id,
'gettext': translation.gettext,
'group': presentation.group,
'groupby': compat.py_groupby, # http://bugs.python.org/issue2246
'http_date': http_date,
'istext': presentation.istext,
'javascript_quote': javascript_quote,
'ngettext': translation.ngettext,
'paginate': presentation.paginate,
'partial': partial,
'pathjoin': pathjoin,
'plaintext': plaintext,
'pprint': pprint.pformat,
'pretty_size': pretty_size,
'pretty_timedelta': pretty_timedelta,
'quote_plus': unicode_quote_plus,
'reversed': reversed,
'separated': presentation.separated,
'shorten_line': shorten_line,
'sorted': sorted,
'time': datetime.time,
'timedelta': datetime.timedelta,
'to_json': presentation.to_json,
'to_unicode': to_unicode,
'utc': utc,
}
# ISystemInfoProvider methods
def get_system_info(self):
import genshi
info = get_pkginfo(genshi).get('version')
if hasattr(genshi, '_speedups'):
info += ' (with speedups)'
else:
info += ' (without speedups)'
yield 'Genshi', info
try:
import babel
except ImportError:
babel = None
if babel is not None:
info = get_pkginfo(babel).get('version')
if not get_available_locales():
info += " (translations unavailable)" # No i18n on purpose
self.log.warning("Locale data is missing")
yield 'Babel', info
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Create the environment templates directory."""
if self.env.path:
templates_dir = os.path.join(self.env.path, 'templates')
if not os.path.exists(templates_dir):
os.mkdir(templates_dir)
site_path = os.path.join(templates_dir, 'site.html.sample')
with open(site_path, 'w') as fileobj:
fileobj.write("""\
<html xmlns="http://www.w3.org/1999/xhtml"
xmlns:xi="http://www.w3.org/2001/XInclude"
xmlns:py="http://genshi.edgewall.org/"
py:strip="">
<!--!
This file allows customizing the appearance of the Trac installation.
Add your customizations here and rename the file to site.html. Note that
it will take precedence over a global site.html placed in the directory
specified by [inherit] templates_dir.
More information about site appearance customization can be found here:
http://trac.edgewall.org/wiki/TracInterfaceCustomization#SiteAppearance
-->
</html>
""")
def environment_needs_upgrade(self, db):
return False
def upgrade_environment(self, db):
pass
# IRequestHandler methods
def match_request(self, req):
match = re.match(r'/chrome/(?P<prefix>[^/]+)/+(?P<filename>.+)',
req.path_info)
if match:
req.args['prefix'] = match.group('prefix')
req.args['filename'] = match.group('filename')
return True
def process_request(self, req):
prefix = req.args['prefix']
filename = req.args['filename']
dirs = []
for provider in self.template_providers:
for dir in [os.path.normpath(dir[1]) for dir
in provider.get_htdocs_dirs() or []
if dir[0] == prefix]:
dirs.append(dir)
path = os.path.normpath(os.path.join(dir, filename))
assert os.path.commonprefix([dir, path]) == dir
if os.path.isfile(path):
req.send_file(path, get_mimetype(path))
self.log.warning('File %s not found in any of %s', filename, dirs)
raise HTTPNotFound('File %s not found', filename)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return [('common', pkg_resources.resource_filename('trac', 'htdocs')),
('shared', self.shared_htdocs_dir),
('site', self.env.get_htdocs_dir())]
def get_templates_dirs(self):
return filter(None, [
self.env.get_templates_dir(),
self.shared_templates_dir,
pkg_resources.resource_filename('trac', 'templates'),
])
# IWikiSyntaxProvider methods
def get_wiki_syntax(self):
return []
def get_link_resolvers(self):
yield ('htdocs', self._format_link)
def _format_link(self, formatter, ns, file, label):
file, query, fragment = formatter.split_link(file)
href = formatter.href.chrome('site', file) + query + fragment
return tag.a(label, href=href)
# Public API methods
def get_all_templates_dirs(self):
"""Return a list of the names of all known templates directories."""
dirs = []
for provider in self.template_providers:
dirs.extend(provider.get_templates_dirs() or [])
return dirs
def prepare_request(self, req, handler=None):
"""Prepare the basic chrome data for the request.
:param req: the request object
:param handler: the `IRequestHandler` instance that is processing the
request
"""
self.log.debug('Prepare chrome data for request')
chrome = {'metas': [], 'links': {}, 'scripts': [], 'script_data': {},
'ctxtnav': [], 'warnings': [], 'notices': []}
req.chrome = chrome
htdocs_location = self.htdocs_location or req.href.chrome('common')
chrome['htdocs_location'] = htdocs_location.rstrip('/') + '/'
# HTML <head> links
add_link(req, 'start', req.href.wiki())
add_link(req, 'search', req.href.search())
add_link(req, 'help', req.href.wiki('TracGuide'))
add_stylesheet(req, 'common/css/trac.css')
add_script(req, self.jquery_location or 'common/js/jquery.js')
# Only activate noConflict mode if requested to by the handler
if handler is not None and \
getattr(handler.__class__, 'jquery_noconflict', False):
add_script(req, 'common/js/noconflict.js')
add_script(req, 'common/js/babel.js')
if req.locale is not None:
add_script(req, 'common/js/messages/%s.js' % req.locale)
add_script(req, 'common/js/trac.js')
add_script(req, 'common/js/search.js')
# Shortcut icon
chrome['icon'] = self.get_icon_data(req)
if chrome['icon']:
src = chrome['icon']['src']
mimetype = chrome['icon']['mimetype']
add_link(req, 'icon', src, mimetype=mimetype)
add_link(req, 'shortcut icon', src, mimetype=mimetype)
# Logo image
chrome['logo'] = self.get_logo_data(req.href, req.abs_href)
# Navigation links
allitems = {}
active = None
for contributor in self.navigation_contributors:
try:
for category, name, text in \
contributor.get_navigation_items(req) or []:
category_section = self.config[category]
if category_section.getbool(name, True):
# the navigation item is enabled (this is the default)
item = None
if isinstance(text, Element) and \
text.tag.localname == 'a':
item = text
label = category_section.get(name + '.label')
href = category_section.get(name + '.href')
if href:
if href.startswith('/'):
href = req.href + href
if label:
item = tag.a(label) # create new label
elif not item:
item = tag.a(text) # wrap old text
item = item(href=href) # use new href
elif label and item: # create new label, use old href
item = tag.a(label, href=item.attrib.get('href'))
elif not item: # use old text
item = text
allitems.setdefault(category, {})[name] = item
if contributor is handler:
active = contributor.get_active_navigation_item(req)
except Exception, e:
name = contributor.__class__.__name__
if isinstance(e, TracError):
self.log.warning("Error with navigation contributor %s",
name)
else:
self.log.error("Error with navigation contributor %s: %s",
name, exception_to_unicode(e))
add_warning(req, _("Error with navigation contributor "
'"%(name)s"', name=name))
nav = {}
for category, items in [(k, v.items()) for k, v in allitems.items()]:
category_order = category + '_order'
if hasattr(self, category_order):
order = getattr(self, category_order)
def navcmp(x, y):
if x[0] not in order:
return int(y[0] in order)
if y[0] not in order:
return -int(x[0] in order)
return cmp(order.index(x[0]), order.index(y[0]))
items.sort(navcmp)
nav[category] = []
for name, label in items:
nav[category].append({
'name': name,
'label': label,
'active': name == active
})
chrome['nav'] = nav
# Default theme file
chrome['theme'] = 'theme.html'
# Avoid recursion by registering as late as possible (#8583)
req.add_redirect_listener(_save_messages)
return chrome
def get_icon_data(self, req):
icon = {}
icon_src = icon_abs_src = self.env.project_icon
if icon_src:
if not icon_src.startswith('/') and icon_src.find('://') == -1:
if '/' in icon_src:
icon_abs_src = req.abs_href.chrome(icon_src)
icon_src = req.href.chrome(icon_src)
else:
icon_abs_src = req.abs_href.chrome('common', icon_src)
icon_src = req.href.chrome('common', icon_src)
mimetype = get_mimetype(icon_src)
icon = {'src': icon_src, 'abs_src': icon_abs_src,
'mimetype': mimetype}
return icon
def get_logo_data(self, href, abs_href=None):
# TODO: Possibly, links to 'common/' could use chrome.htdocs_location
logo = {}
logo_src = self.logo_src
if logo_src:
abs_href = abs_href or href
if logo_src.startswith(('http://', 'https://', '/')):
# Nothing further can be calculated
logo_src_abs = logo_src
elif '/' in logo_src:
# Like 'common/trac_banner.png' or 'site/my_banner.png'
logo_src_abs = abs_href.chrome(logo_src)
logo_src = href.chrome(logo_src)
else:
# Like 'trac_banner.png'
logo_src_abs = abs_href.chrome('common', logo_src)
logo_src = href.chrome('common', logo_src)
width = self.logo_width if self.logo_width > -1 else None
height = self.logo_height if self.logo_height > -1 else None
logo = {
'link': self.logo_link, 'src': logo_src,
'src_abs': logo_src_abs, 'alt': self.logo_alt,
'width': width, 'height': height
}
else:
logo = {'link': self.logo_link, 'alt': self.logo_alt}
return logo
def populate_data(self, req, data):
d = self._default_context_data.copy()
d['trac'] = {
'version': pkg_resources.resource_string('trac', 'TRAC_VERSION').strip(),
'homepage': 'http://trac.edgewall.org/', # FIXME: use setup data
}
href = req and req.href
abs_href = req.abs_href if req else self.env.abs_href
admin_href = None
if self.env.project_admin_trac_url == '.':
admin_href = href
elif self.env.project_admin_trac_url:
admin_href = Href(self.env.project_admin_trac_url)
d['project'] = {
'name': self.env.project_name,
'descr': self.env.project_description,
'url': self.env.project_url,
'admin': self.env.project_admin,
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/__init__.py | trac/trac/web/__init__.py | # Workaround for http://bugs.python.org/issue6763 and
# http://bugs.python.org/issue5853 thread issues
import mimetypes
mimetypes.init()
# With mod_python we'll have to delay importing trac.web.api until
# modpython_frontend.handler() has been called since the
# PYTHON_EGG_CACHE variable is set from there
#
# TODO: Remove this once the Genshi zip_safe issue has been resolved.
import os
from pkg_resources import get_distribution
if not os.path.isdir(get_distribution('genshi').location):
try:
import mod_python.apache
import sys
if 'trac.web.modpython_frontend' in sys.modules:
from trac.web.api import *
except ImportError:
from trac.web.api import *
else:
from trac.web.api import *
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/session.py | trac/trac/web/session.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004 Daniel Lundin <daniel@edgewall.com>
# Copyright (C) 2004-2006 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2006 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2008 Matt Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Daniel Lundin <daniel@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
from __future__ import with_statement
import sys
import time
from trac.admin.api import console_date_format
from trac.core import TracError, Component, implements
from trac.util import hex_entropy
from trac.util.text import print_table
from trac.util.translation import _
from trac.util.datefmt import format_date, parse_date, to_datetime, \
to_timestamp
from trac.admin.api import IAdminCommandProvider, AdminCommandError
UPDATE_INTERVAL = 3600 * 24 # Update session last_visit time stamp after 1 day
PURGE_AGE = 3600 * 24 * 90 # Purge session after 90 days idle
COOKIE_KEY = 'trac_session'
# Note: as we often manipulate both the `session` and the
# `session_attribute` tables, there's a possibility of table
# deadlocks (#9705). We try to prevent them to happen by always
# accessing the tables in the same order within the transaction,
# first `session`, then `session_attribute`.
class DetachedSession(dict):
def __init__(self, env, sid):
dict.__init__(self)
self.env = env
self.sid = None
if sid:
self.get_session(sid, authenticated=True)
else:
self.authenticated = False
self.last_visit = 0
self._new = True
self._old = {}
def __setitem__(self, key, value):
dict.__setitem__(self, key, unicode(value))
def set(self, key, value, default=None):
"""Set a variable in the session, or remove it if it's equal to the
default value.
"""
value = unicode(value)
if default is not None:
default = unicode(default)
if value == default:
self.pop(key, None)
return
dict.__setitem__(self, key, value)
def get_session(self, sid, authenticated=False):
self.env.log.debug("Retrieving session for ID %r", sid)
with self.env.db_query as db:
self.sid = sid
self.authenticated = authenticated
self.clear()
for last_visit, in db("""
SELECT last_visit FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, int(authenticated))):
self._new = False
self.last_visit = int(last_visit or 0)
self.update(db("""
SELECT name, value FROM session_attribute
WHERE sid=%s and authenticated=%s
""", (sid, int(authenticated))))
self._old = self.copy()
break
else:
self.last_visit = 0
self._new = True
self._old = {}
def save(self):
items = self.items()
if not self._old and not items:
# The session doesn't have associated data, so there's no need to
# persist it
return
authenticated = int(self.authenticated)
now = int(time.time())
# We can't do the session management in one big transaction,
# as the intertwined changes to both the session and
# session_attribute tables are prone to deadlocks (#9705).
# Therefore we first we save the current session, then we
# eventually purge the tables.
session_saved = False
with self.env.db_transaction as db:
# Try to save the session if it's a new one. A failure to
# do so is not critical but we nevertheless skip the
# following steps.
if self._new:
self.last_visit = now
self._new = False
# The session might already exist even if _new is True since
# it could have been created by a concurrent request (#3563).
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s,%s,%s)
""", (self.sid, self.last_visit, authenticated))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Session %s already exists', self.sid)
db.rollback()
return
# Remove former values for session_attribute and save the
# new ones. The last concurrent request to do so "wins".
if self._old != self:
if not items and not authenticated:
# No need to keep around empty unauthenticated sessions
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(self.sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (self.sid, authenticated))
self._old = dict(self.items())
# The session variables might already have been updated by a
# concurrent request.
try:
db.executemany("""
INSERT INTO session_attribute
(sid,authenticated,name,value)
VALUES (%s,%s,%s,%s)
""", [(self.sid, authenticated, k, v)
for k, v in items])
except self.env.db_exc.IntegrityError:
self.env.log.warning('Attributes for session %s already '
'updated', self.sid)
db.rollback()
return
session_saved = True
# Purge expired sessions. We do this only when the session was
# changed as to minimize the purging.
if session_saved and now - self.last_visit > UPDATE_INTERVAL:
self.last_visit = now
mintime = now - PURGE_AGE
with self.env.db_transaction as db:
# Update the session last visit time if it is over an
# hour old, so that session doesn't get purged
self.env.log.info("Refreshing session %s", self.sid)
db("""UPDATE session SET last_visit=%s
WHERE sid=%s AND authenticated=%s
""", (self.last_visit, self.sid, authenticated))
self.env.log.debug('Purging old, expired, sessions.')
db("""DELETE FROM session_attribute
WHERE authenticated=0 AND sid IN (
SELECT sid FROM session
WHERE authenticated=0 AND last_visit < %s
)
""", (mintime,))
# Avoid holding locks on lot of rows on both session_attribute
# and session tables
with self.env.db_transaction as db:
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit < %s
""", (mintime,))
class Session(DetachedSession):
"""Basic session handling and per-session storage."""
def __init__(self, env, req):
super(Session, self).__init__(env, None)
self.req = req
if req.authname == 'anonymous':
if not req.incookie.has_key(COOKIE_KEY):
self.sid = hex_entropy(24)
self.bake_cookie()
else:
sid = req.incookie[COOKIE_KEY].value
self.get_session(sid)
else:
if req.incookie.has_key(COOKIE_KEY):
sid = req.incookie[COOKIE_KEY].value
self.promote_session(sid)
self.get_session(req.authname, authenticated=True)
def bake_cookie(self, expires=PURGE_AGE):
assert self.sid, 'Session ID not set'
self.req.outcookie[COOKIE_KEY] = self.sid
self.req.outcookie[COOKIE_KEY]['path'] = self.req.base_path or '/'
self.req.outcookie[COOKIE_KEY]['expires'] = expires
if self.env.secure_cookies:
self.req.outcookie[COOKIE_KEY]['secure'] = True
if sys.version_info >= (2, 6):
self.req.outcookie[COOKIE_KEY]['httponly'] = True
def get_session(self, sid, authenticated=False):
refresh_cookie = False
if self.sid and sid != self.sid:
refresh_cookie = True
super(Session, self).get_session(sid, authenticated)
if self.last_visit and time.time() - self.last_visit > UPDATE_INTERVAL:
refresh_cookie = True
# Refresh the session cookie if this is the first visit after a day
if not authenticated and refresh_cookie:
self.bake_cookie()
def change_sid(self, new_sid):
assert self.req.authname == 'anonymous', \
'Cannot change ID of authenticated session'
assert new_sid, 'Session ID cannot be empty'
if new_sid == self.sid:
return
with self.env.db_transaction as db:
if db("SELECT sid FROM session WHERE sid=%s", (new_sid,)):
raise TracError(_("Session '%(id)s' already exists. "
"Please choose a different session ID.",
id=new_sid),
_("Error renaming session"))
self.env.log.debug("Changing session ID %s to %s", self.sid,
new_sid)
db("UPDATE session SET sid=%s WHERE sid=%s AND authenticated=0",
(new_sid, self.sid))
db("""UPDATE session_attribute SET sid=%s
WHERE sid=%s and authenticated=0
""", (new_sid, self.sid))
self.sid = new_sid
self.bake_cookie()
def promote_session(self, sid):
"""Promotes an anonymous session to an authenticated session, if there
is no preexisting session data for that user name.
"""
assert self.req.authname != 'anonymous', \
"Cannot promote session of anonymous user"
with self.env.db_transaction as db:
authenticated_flags = [authenticated for authenticated, in db(
"SELECT authenticated FROM session WHERE sid=%s OR sid=%s",
(sid, self.req.authname))]
if len(authenticated_flags) == 2:
# There's already an authenticated session for the user,
# we simply delete the anonymous session
db("DELETE FROM session WHERE sid=%s AND authenticated=0",
(sid,))
db("""DELETE FROM session_attribute
WHERE sid=%s AND authenticated=0
""", (sid,))
elif len(authenticated_flags) == 1:
if not authenticated_flags[0]:
# Update the anomymous session records so the session ID
# becomes the user name, and set the authenticated flag.
self.env.log.debug("Promoting anonymous session %s to "
"authenticated session for user %s",
sid, self.req.authname)
db("""UPDATE session SET sid=%s, authenticated=1
WHERE sid=%s AND authenticated=0
""", (self.req.authname, sid))
db("""UPDATE session_attribute SET sid=%s, authenticated=1
WHERE sid=%s
""", (self.req.authname, sid))
else:
# We didn't have an anonymous session for this sid. The
# authenticated session might have been inserted between the
# SELECT above and here, so we catch the error.
try:
db("""INSERT INTO session (sid, last_visit, authenticated)
VALUES (%s, %s, 1)
""", (self.req.authname, int(time.time())))
except self.env.db_exc.IntegrityError:
self.env.log.warning('Authenticated session for %s '
'already exists', self.req.authname)
db.rollback()
self._new = False
self.sid = sid
self.bake_cookie(0) # expire the cookie
class SessionAdmin(Component):
"""trac-admin command provider for session management"""
implements(IAdminCommandProvider)
def get_admin_commands(self):
yield ('session list', '[sid[:0|1]] [...]',
"""List the name and email for the given sids
Specifying the sid 'anonymous' lists all unauthenticated
sessions, and 'authenticated' all authenticated sessions.
'*' lists all sessions, and is the default if no sids are
given.
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default).""",
self._complete_list, self._do_list)
yield ('session add', '<sid[:0|1]> [name] [email]',
"""Create a session for the given sid
Populates the name and email attributes for the given session.
Adding a suffix ':0' to the sid makes the session
unauthenticated, and a suffix ':1' makes it authenticated (the
default if no suffix is specified).""",
None, self._do_add)
yield ('session set', '<name|email> <sid[:0|1]> <value>',
"""Set the name or email attribute of the given sid
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default).""",
self._complete_set, self._do_set)
yield ('session delete', '<sid[:0|1]> [...]',
"""Delete the session of the specified sid
An sid suffix ':0' operates on an unauthenticated session with
the given sid, and a suffix ':1' on an authenticated session
(the default). Specifying the sid 'anonymous' will delete all
anonymous sessions.""",
self._complete_delete, self._do_delete)
yield ('session purge', '<age>',
"""Purge all anonymous sessions older than the given age
Age may be specified as a relative time like "90 days ago", or
in YYYYMMDD format.""",
None, self._do_purge)
def _split_sid(self, sid):
if sid.endswith(':0'):
return (sid[:-2], 0)
elif sid.endswith(':1'):
return (sid[:-2], 1)
else:
return (sid, 1)
def _get_sids(self):
rows = self.env.db_query("SELECT sid, authenticated FROM session")
return ['%s:%d' % (sid, auth) for sid, auth in rows]
def _get_list(self, sids):
all_anon = 'anonymous' in sids or '*' in sids
all_auth = 'authenticated' in sids or '*' in sids
sids = set(self._split_sid(sid) for sid in sids
if sid not in ('anonymous', 'authenticated', '*'))
rows = self.env.db_query("""
SELECT DISTINCT s.sid, s.authenticated, s.last_visit,
n.value, e.value
FROM session AS s
LEFT JOIN session_attribute AS n
ON (n.sid=s.sid AND n.authenticated=s.authenticated
AND n.name='name')
LEFT JOIN session_attribute AS e
ON (e.sid=s.sid AND e.authenticated=s.authenticated
AND e.name='email')
ORDER BY s.sid, s.authenticated
""")
for sid, authenticated, last_visit, name, email in rows:
if all_anon and not authenticated or all_auth and authenticated \
or (sid, authenticated) in sids:
yield (sid, authenticated, last_visit, name, email)
def _complete_list(self, args):
all_sids = self._get_sids() + ['*', 'anonymous', 'authenticated']
return set(all_sids) - set(args)
def _complete_set(self, args):
if len(args) == 1:
return ['name', 'email']
elif len(args) == 2:
return self._get_sids()
def _complete_delete(self, args):
all_sids = self._get_sids() + ['anonymous']
return set(all_sids) - set(args)
def _do_list(self, *sids):
if not sids:
sids = ['*']
print_table([(r[0], r[1], format_date(to_datetime(r[2]),
console_date_format),
r[3], r[4])
for r in self._get_list(sids)],
[_('SID'), _('Auth'), _('Last Visit'), _('Name'),
_('Email')])
def _do_add(self, sid, name=None, email=None):
sid, authenticated = self._split_sid(sid)
with self.env.db_transaction as db:
try:
db("INSERT INTO session VALUES (%s, %s, %s)",
(sid, authenticated, int(time.time())))
except Exception:
raise AdminCommandError(_("Session '%(sid)s' already exists",
sid=sid))
if name is not None:
db("INSERT INTO session_attribute VALUES (%s,%s,'name',%s)",
(sid, authenticated, name))
if email is not None:
db("INSERT INTO session_attribute VALUES (%s,%s,'email',%s)",
(sid, authenticated, email))
def _do_set(self, attr, sid, val):
if attr not in ('name', 'email'):
raise AdminCommandError(_("Invalid attribute '%(attr)s'",
attr=attr))
sid, authenticated = self._split_sid(sid)
with self.env.db_transaction as db:
if not db("""SELECT sid FROM session
WHERE sid=%s AND authenticated=%s""",
(sid, authenticated)):
raise AdminCommandError(_("Session '%(sid)s' not found",
sid=sid))
db("""
DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s AND name=%s
""", (sid, authenticated, attr))
db("INSERT INTO session_attribute VALUES (%s, %s, %s, %s)",
(sid, authenticated, attr, val))
def _do_delete(self, *sids):
with self.env.db_transaction as db:
for sid in sids:
sid, authenticated = self._split_sid(sid)
if sid == 'anonymous':
db("DELETE FROM session WHERE authenticated=0")
db("DELETE FROM session_attribute WHERE authenticated=0")
else:
db("""
DELETE FROM session
WHERE sid=%s AND authenticated=%s
""", (sid, authenticated))
db("""
DELETE FROM session_attribute
WHERE sid=%s AND authenticated=%s
""", (sid, authenticated))
def _do_purge(self, age):
when = parse_date(age)
with self.env.db_transaction as db:
ts = to_timestamp(when)
db("""
DELETE FROM session
WHERE authenticated=0 AND last_visit<%s
""", (ts,))
db("""
DELETE FROM session_attribute
WHERE authenticated=0
AND sid NOT IN (SELECT sid FROM session
WHERE authenticated=0)
""")
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/cgi_frontend.py | trac/trac/web/cgi_frontend.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C)2005-2009 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
import os
import pkg_resources
import sys
from trac import __version__ as VERSION
from trac.web.main import dispatch_request
from trac.web.wsgi import WSGIGateway
class CGIGateway(WSGIGateway):
wsgi_multithread = False
wsgi_multiprocess = False
wsgi_run_once = True
def __init__(self):
WSGIGateway.__init__(self, dict(os.environ))
def _write(self, data):
assert self.headers_set, 'Response not started'
if not self.headers_sent:
status, headers = self.headers_sent = self.headers_set
sys.stdout.write('Status: %s\r\n' % status)
for header in headers:
sys.stdout.write('%s: %s\r\n' % header)
sys.stdout.write('\r\n')
sys.stdout.flush()
sys.stdout.write(data)
sys.stdout.flush()
def run():
try: # Make FreeBSD use blocking I/O like other platforms
import fcntl
for stream in [sys.stdin, sys.stdout]:
fd = stream.fileno()
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
except (ImportError, AttributeError):
pass
try: # Use binary I/O on Windows
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
except ImportError:
pass
gateway = CGIGateway()
gateway.run(dispatch_request)
if __name__ == '__main__':
pkg_resources.require('Trac==%s' % VERSION)
run()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/href.py | trac/trac/web/href.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2004 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christopher Lenz <cmlenz@gmx.de>
import re
from trac.util.text import unicode_quote, unicode_urlencode
slashes_re = re.compile(r'/{2,}')
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which then are used to assemble the URL.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/trac')
>>> href('ticket', 540)
'/trac/ticket/540'
>>> href('ticket', 540, 'attachment', 'bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
>>> href('ticket', '540/attachment/bugfix.patch')
'/trac/ticket/540/attachment/bugfix.patch'
If a positional parameter evaluates to None, it will be skipped:
>>> href('ticket', 540, 'attachment', None)
'/trac/ticket/540/attachment'
The first path segment can also be specified by calling an attribute
of the instance, as follows:
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.changeset(42, format='diff')
'/trac/changeset/42?format=diff'
Simply calling the Href object with no arguments will return the base URL:
>>> href()
'/trac'
Keyword arguments are added to the query string, unless the value is None:
>>> href = Href('/trac')
>>> href('timeline', format='rss')
'/trac/timeline?format=rss'
>>> href('timeline', format=None)
'/trac/timeline'
>>> href('search', q='foo bar')
'/trac/search?q=foo+bar'
Multiple values for one parameter are specified using a sequence (a list or
tuple) for the parameter:
>>> href('timeline', show=['ticket', 'wiki', 'changeset'])
'/trac/timeline?show=ticket&show=wiki&show=changeset'
Alternatively, query string parameters can be added by passing a dict or
list as last positional argument:
>>> href('timeline', {'from': '02/24/05', 'daysback': 30})
'/trac/timeline?daysback=30&from=02%2F24%2F05'
>>> href('timeline', {})
'/trac/timeline'
>>> href('timeline', [('from', '02/24/05')])
'/trac/timeline?from=02%2F24%2F05'
>>> href('timeline', ()) == href('timeline', []) == href('timeline', {})
True
The usual way of quoting arguments that would otherwise be interpreted
as Python keywords is supported too:
>>> href('timeline', from_='02/24/05', daysback=30)
'/trac/timeline?from=02%2F24%2F05&daysback=30'
If the order of query string parameters should be preserved, you may also
pass a sequence of (name, value) tuples as last positional argument:
>>> href('query', (('group', 'component'), ('groupdesc', 1)))
'/trac/query?group=component&groupdesc=1'
>>> params = []
>>> params.append(('group', 'component'))
>>> params.append(('groupdesc', 1))
>>> href('query', params)
'/trac/query?group=component&groupdesc=1'
By specifying an absolute base, the function returned will also generate
absolute URLs:
>>> href = Href('http://trac.edgewall.org')
>>> href('ticket', 540)
'http://trac.edgewall.org/ticket/540'
>>> href = Href('https://trac.edgewall.org')
>>> href('ticket', 540)
'https://trac.edgewall.org/ticket/540'
In common usage, it may improve readability to use the function-calling
ability for the first component of the URL as mentioned earlier:
>>> href = Href('/trac')
>>> href.ticket(540)
'/trac/ticket/540'
>>> href.browser('/trunk/README.txt', format='txt')
'/trac/browser/trunk/README.txt?format=txt'
The ``path_safe`` argument specifies the characters that don't
need to be quoted in the path arguments. Likewise, the
``query_safe`` argument specifies the characters that don't need
to be quoted in the query string:
>>> href = Href('')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/%3Clook%2Chere%3E?param=%3Chere%2Ctoo%3E'
>>> href = Href('', path_safe='/<,', query_safe=',>')
>>> href.milestone('<look,here>', param='<here,too>')
'/milestone/<look,here%3E?param=%3Chere,too>'
"""
def __init__(self, base, path_safe="/!~*'()", query_safe="!~*'()"):
self.base = base.rstrip('/')
self.path_safe = path_safe
self.query_safe = query_safe
self._derived = {}
def __call__(self, *args, **kw):
href = self.base
params = []
def add_param(name, value):
if isinstance(value, (list, tuple)):
for i in [i for i in value if i is not None]:
params.append((name, i))
elif value is not None:
params.append((name, value))
if args:
lastp = args[-1]
if isinstance(lastp, dict):
for k, v in lastp.items():
add_param(k, v)
args = args[:-1]
elif isinstance(lastp, (list, tuple)):
for k, v in lastp:
add_param(k, v)
args = args[:-1]
# build the path
path = '/'.join(unicode_quote(unicode(arg).strip('/'), self.path_safe)
for arg in args if arg is not None)
if path:
href += '/' + slashes_re.sub('/', path).lstrip('/')
elif not href:
href = '/'
# assemble the query string
for k, v in kw.items():
add_param(k[:-1] if k.endswith('_') else k, v)
if params:
href += '?' + unicode_urlencode(params, self.query_safe)
return href
def __getattr__(self, name):
if name not in self._derived:
self._derived[name] = lambda *args, **kw: self(name, *args, **kw)
return self._derived[name]
def __add__(self, rhs):
if rhs.startswith('/'):
return self.base + rhs
if rhs:
return self.base + '/' + rhs
return self.base or '/'
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/modpython_frontend.py | trac/trac/web/modpython_frontend.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2009 Edgewall Software
# Copyright (C) 2004-2007 Christopher Lenz <cmlenz@gmx.de>
# Copyright (C) 2005 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
# Matthew Good <trac@matt-good.net>
from __future__ import with_statement
import os
import pkg_resources
import sys
import urllib
try:
import threading
except ImportError:
import dummy_threading as threading
from mod_python import apache
try:
from mod_python import version
except ImportError:
version = "< 3.2"
from trac import __version__ as VERSION
from trac.web.wsgi import WSGIGateway, _ErrorsWrapper
class InputWrapper(object):
def __init__(self, req):
self.req = req
def close(self):
pass
def read(self, size=-1):
return self.req.read(size)
def readline(self, size=-1):
return self.req.readline(size)
def readlines(self, hint=-1):
return self.req.readlines(hint)
class ModPythonGateway(WSGIGateway):
wsgi_multithread = apache.mpm_query(apache.AP_MPMQ_IS_THREADED) > 0
wsgi_multiprocess = apache.mpm_query(apache.AP_MPMQ_IS_FORKED) > 0
def __init__(self, req, options):
environ = {}
environ.update(apache.build_cgi_env(req))
environ['trac.web.frontend'] = 'mod_python'
environ['trac.web.version'] = version
if 'TracEnv' in options:
environ['trac.env_path'] = options['TracEnv']
if 'TracEnvParentDir' in options:
environ['trac.env_parent_dir'] = options['TracEnvParentDir']
if 'TracEnvIndexTemplate' in options:
environ['trac.env_index_template'] = options['TracEnvIndexTemplate']
if 'TracTemplateVars' in options:
environ['trac.template_vars'] = options['TracTemplateVars']
if 'TracLocale' in options:
environ['trac.locale'] = options['TracLocale']
if 'TracUriRoot' in options:
# Special handling of SCRIPT_NAME/PATH_INFO for mod_python, which
# tends to get confused for whatever reason
root_uri = options['TracUriRoot'].rstrip('/')
request_uri = environ['REQUEST_URI'].split('?', 1)[0]
if not request_uri.startswith(root_uri):
raise ValueError('TracUriRoot set to %s but request URL '
'is %s' % (root_uri, request_uri))
environ['SCRIPT_NAME'] = root_uri
environ['PATH_INFO'] = urllib.unquote(request_uri[len(root_uri):])
WSGIGateway.__init__(self, environ, InputWrapper(req),
_ErrorsWrapper(lambda x: req.log_error(x)))
self.req = req
def _send_headers(self):
assert self.headers_set, 'Response not started'
if not self.headers_sent:
status, headers = self.headers_sent = self.headers_set
self.req.status = int(status[:3])
for name, value in headers:
if name.lower() == 'content-length':
self.req.set_content_length(int(value))
elif name.lower() == 'content-type':
self.req.content_type = value
else:
self.req.headers_out.add(name, value)
def _sendfile(self, fileobj):
self._send_headers()
try:
self.req.sendfile(fileobj.name)
except IOError, e:
if 'client closed connection' not in str(e):
raise
def _write(self, data):
self._send_headers()
try:
self.req.write(data)
except IOError, e:
if 'client closed connection' not in str(e):
raise
_first = True
_first_lock = threading.Lock()
def handler(req):
global _first
with _first_lock:
if _first:
_first = False
options = req.get_options()
egg_cache = options.get('PYTHON_EGG_CACHE')
if not egg_cache and options.get('TracEnv'):
egg_cache = os.path.join(options.get('TracEnv'), '.egg-cache')
if not egg_cache and options.get('TracEnvParentDir'):
egg_cache = os.path.join(options.get('TracEnvParentDir'), '.egg-cache')
if not egg_cache and req.subprocess_env.get('PYTHON_EGG_CACHE'):
egg_cache = req.subprocess_env.get('PYTHON_EGG_CACHE')
if egg_cache:
pkg_resources.set_extraction_path(egg_cache)
reload(sys.modules['trac.web'])
pkg_resources.require('Trac==%s' % VERSION)
gateway = ModPythonGateway(req, req.get_options())
from trac.web.main import dispatch_request
gateway.run(dispatch_request)
return apache.OK
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/auth.py | trac/trac/web/auth.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
from __future__ import with_statement
try:
from base64 import b64decode, b64encode
except ImportError:
from base64 import decodestring as b64decode
from base64 import encodestring as b64encode
from hashlib import md5, sha1
import os
import re
import sys
import time
import urllib2
import urlparse
from genshi.builder import tag
from trac.config import BoolOption, IntOption, Option
from trac.core import *
from trac.web.api import IAuthenticator, IRequestHandler
from trac.web.chrome import INavigationContributor
from trac.util import hex_entropy, md5crypt
from trac.util.concurrency import threading
from trac.util.translation import _, tag_
class LoginModule(Component):
"""User authentication manager.
This component implements user authentication based on HTTP
authentication provided by the web-server, combined with cookies
for communicating the login information across the whole site.
This mechanism expects that the web-server is setup so that a
request to the path '/login' requires authentication (such as
Basic or Digest). The login name is then stored in the database
and associated with a unique key that gets passed back to the user
agent using the 'trac_auth' cookie. This cookie is used to
identify the user in subsequent requests to non-protected
resources.
"""
implements(IAuthenticator, INavigationContributor, IRequestHandler)
check_ip = BoolOption('trac', 'check_auth_ip', 'false',
"""Whether the IP address of the user should be checked for
authentication (''since 0.9'').""")
ignore_case = BoolOption('trac', 'ignore_auth_case', 'false',
"""Whether login names should be converted to lower case
(''since 0.9'').""")
auth_cookie_lifetime = IntOption('trac', 'auth_cookie_lifetime', 0,
"""Lifetime of the authentication cookie, in seconds.
This value determines how long the browser will cache
authentication information, and therefore, after how much
inactivity a user will have to log in again. The default value
of 0 makes the cookie expire at the end of the browsing
session. (''since 0.12'')""")
auth_cookie_path = Option('trac', 'auth_cookie_path', '',
"""Path for the authentication cookie. Set this to the common
base path of several Trac instances if you want them to share
the cookie. (''since 0.12'')""")
# IAuthenticator methods
def authenticate(self, req):
authname = None
if req.remote_user:
authname = req.remote_user
elif req.incookie.has_key('trac_auth'):
authname = self._get_name_for_cookie(req,
req.incookie['trac_auth'])
if not authname:
return None
if self.ignore_case:
authname = authname.lower()
return authname
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'login'
def get_navigation_items(self, req):
if req.authname and req.authname != 'anonymous':
yield ('metanav', 'login', _('logged in as %(user)s',
user=req.authname))
yield ('metanav', 'logout',
tag.a(_('Logout'), href=req.href.logout()))
else:
yield ('metanav', 'login',
tag.a(_('Login'), href=req.href.login()))
# IRequestHandler methods
def match_request(self, req):
return re.match('/(login|logout)/?$', req.path_info)
def process_request(self, req):
if req.path_info.startswith('/login'):
self._do_login(req)
elif req.path_info.startswith('/logout'):
self._do_logout(req)
self._redirect_back(req)
# Internal methods
def _do_login(self, req):
"""Log the remote user in.
This function expects to be called when the remote user name
is available. The user name is inserted into the `auth_cookie`
table and a cookie identifying the user on subsequent requests
is sent back to the client.
If the Authenticator was created with `ignore_case` set to
true, then the authentication name passed from the web server
in req.remote_user will be converted to lower case before
being used. This is to avoid problems on installations
authenticating against Windows which is not case sensitive
regarding user names and domain names
"""
if not req.remote_user:
# TRANSLATOR: ... refer to the 'installation documentation'. (link)
inst_doc = tag.a(_('installation documentation'),
title=_("Configuring Authentication"),
href=req.href.wiki('TracInstall')
+ "#ConfiguringAuthentication")
raise TracError(tag_("Authentication information not available. "
"Please refer to the %(inst_doc)s.",
inst_doc=inst_doc))
remote_user = req.remote_user
if self.ignore_case:
remote_user = remote_user.lower()
assert req.authname in ('anonymous', remote_user), \
_('Already logged in as %(user)s.', user=req.authname)
with self.env.db_transaction as db:
# Delete cookies older than 10 days
db("DELETE FROM auth_cookie WHERE time < %s",
(int(time.time()) - 86400 * 10,))
# Insert a new cookie if we haven't already got one
cookie = None
trac_auth = req.incookie.get('trac_auth')
if trac_auth is not None:
name = self._cookie_to_name(req, trac_auth)
cookie = trac_auth.value if name == remote_user else None
if cookie is None:
cookie = hex_entropy()
db("""
INSERT INTO auth_cookie (cookie, name, ipnr, time)
VALUES (%s, %s, %s, %s)
""", (cookie, remote_user, req.remote_addr,
int(time.time())))
req.authname = remote_user
req.outcookie['trac_auth'] = cookie
req.outcookie['trac_auth']['path'] = self.auth_cookie_path \
or req.base_path or '/'
if self.env.secure_cookies:
req.outcookie['trac_auth']['secure'] = True
if sys.version_info >= (2, 6):
req.outcookie['trac_auth']['httponly'] = True
if self.auth_cookie_lifetime > 0:
req.outcookie['trac_auth']['expires'] = self.auth_cookie_lifetime
def _do_logout(self, req):
"""Log the user out.
Simply deletes the corresponding record from the auth_cookie
table.
"""
if req.authname == 'anonymous':
# Not logged in
return
if 'trac_auth' in req.incookie:
self.env.db_transaction("DELETE FROM auth_cookie WHERE cookie=%s",
(req.incookie['trac_auth'].value,))
else:
self.env.db_transaction("DELETE FROM auth_cookie WHERE name=%s",
(req.authname,))
self._expire_cookie(req)
custom_redirect = self.config['metanav'].get('logout.redirect')
if custom_redirect:
if not re.match(r'https?:|/', custom_redirect):
custom_redirect = req.href(custom_redirect)
req.redirect(custom_redirect)
def _expire_cookie(self, req):
"""Instruct the user agent to drop the auth cookie by setting
the "expires" property to a date in the past.
"""
req.outcookie['trac_auth'] = ''
req.outcookie['trac_auth']['path'] = self.auth_cookie_path \
or req.base_path or '/'
req.outcookie['trac_auth']['expires'] = -10000
if self.env.secure_cookies:
req.outcookie['trac_auth']['secure'] = True
if sys.version_info >= (2, 6):
req.outcookie['trac_auth']['httponly'] = True
def _cookie_to_name(self, req, cookie):
# This is separated from _get_name_for_cookie(), because the
# latter is overridden in AccountManager.
if self.check_ip:
sql = "SELECT name FROM auth_cookie WHERE cookie=%s AND ipnr=%s"
args = (cookie.value, req.remote_addr)
else:
sql = "SELECT name FROM auth_cookie WHERE cookie=%s"
args = (cookie.value,)
for name, in self.env.db_query(sql, args):
return name
def _get_name_for_cookie(self, req, cookie):
name = self._cookie_to_name(req, cookie)
if name is None:
# The cookie is invalid (or has been purged from the
# database), so tell the user agent to drop it as it is
# invalid
self._expire_cookie(req)
return name
def _redirect_back(self, req):
"""Redirect the user back to the URL she came from."""
referer = self._referer(req)
if referer:
if not referer.startswith(('http://', 'https://')):
# Make URL absolute
scheme, host = urlparse.urlparse(req.base_url)[:2]
referer = urlparse.urlunparse((scheme, host, referer, None,
None, None))
pos = req.base_url.find(':')
base_scheme = req.base_url[:pos]
base_noscheme = req.base_url[pos:]
base_noscheme_norm = base_noscheme.rstrip('/')
referer_noscheme = referer[referer.find(':'):]
# only redirect to referer if it is from the same site
if referer_noscheme == base_noscheme or \
referer_noscheme.startswith(base_noscheme_norm + '/'):
# avoid redirect loops
if referer_noscheme.rstrip('/') != \
base_noscheme_norm + req.path_info.rstrip('/'):
req.redirect(base_scheme + referer_noscheme)
req.redirect(req.abs_href())
def _referer(self, req):
return req.args.get('referer') or req.get_header('Referer')
class HTTPAuthentication(object):
def do_auth(self, environ, start_response):
raise NotImplementedError
class PasswordFileAuthentication(HTTPAuthentication):
def __init__(self, filename):
self.filename = filename
self.mtime = os.stat(filename).st_mtime
self.load(self.filename)
self._lock = threading.Lock()
def check_reload(self):
with self._lock:
mtime = os.stat(self.filename).st_mtime
if mtime > self.mtime:
self.mtime = mtime
self.load(self.filename)
class BasicAuthentication(PasswordFileAuthentication):
def __init__(self, htpasswd, realm):
# FIXME pass a logger
self.realm = realm
try:
import crypt
self.crypt = crypt.crypt
except ImportError:
try:
import fcrypt
self.crypt = fcrypt.crypt
except ImportError:
self.crypt = None
PasswordFileAuthentication.__init__(self, htpasswd)
def load(self, filename):
# FIXME use a logger
self.hash = {}
fd = open(filename, 'r')
for line in fd:
line = line.strip()
if not line:
continue
try:
u, h = line.split(':')
except ValueError:
print>>sys.stderr, 'Warning: invalid password line in %s: ' \
'%s' % (filename, line)
continue
if '$' in h or h.startswith('{SHA}') or self.crypt:
self.hash[u] = h
else:
print>>sys.stderr, 'Warning: cannot parse password for ' \
'user "%s" without the "crypt" module' % u
if self.hash == {}:
print>>sys.stderr, "Warning: found no users in file:", filename
def test(self, user, password):
self.check_reload()
the_hash = self.hash.get(user)
if the_hash is None:
return False
if the_hash.startswith('{SHA}'):
return b64encode(sha1(password).digest()) == the_hash[5:]
if not '$' in the_hash:
return self.crypt(password, the_hash[:2]) == the_hash
magic, salt = the_hash[1:].split('$')[:2]
magic = '$' + magic + '$'
return md5crypt(password, salt, magic) == the_hash
def do_auth(self, environ, start_response):
header = environ.get('HTTP_AUTHORIZATION')
if header and header.startswith('Basic'):
auth = b64decode(header[6:]).split(':')
if len(auth) == 2:
user, password = auth
if self.test(user, password):
return user
start_response('401 Unauthorized',
[('WWW-Authenticate', 'Basic realm="%s"' % self.realm),
('Content-Length', '0')])('')
class DigestAuthentication(PasswordFileAuthentication):
"""A simple HTTP digest authentication implementation
(:rfc:`2617`)."""
MAX_NONCES = 100
def __init__(self, htdigest, realm):
# FIXME pass a logger
self.active_nonces = []
self.realm = realm
PasswordFileAuthentication.__init__(self, htdigest)
def load(self, filename):
"""Load account information from apache style htdigest files,
only users from the specified realm are used
"""
# FIXME use a logger
self.hash = {}
fd = open(filename, 'r')
for line in fd.readlines():
line = line.strip()
if not line:
continue
try:
u, r, a1 = line.split(':')
except ValueError:
print>>sys.stderr, 'Warning: invalid digest line in %s: %s' \
% (filename, line)
continue
if r == self.realm:
self.hash[u] = a1
if self.hash == {}:
print>>sys.stderr, "Warning: found no users in realm:", self.realm
def parse_auth_header(self, authorization):
values = {}
for value in urllib2.parse_http_list(authorization):
n, v = value.split('=', 1)
if v[0] == '"' and v[-1] == '"':
values[n] = v[1:-1]
else:
values[n] = v
return values
def send_auth_request(self, environ, start_response, stale='false'):
"""Send a digest challange to the browser. Record used nonces
to avoid replay attacks.
"""
nonce = hex_entropy()
self.active_nonces.append(nonce)
if len(self.active_nonces) > self.MAX_NONCES:
self.active_nonces = self.active_nonces[-self.MAX_NONCES:]
start_response('401 Unauthorized',
[('WWW-Authenticate',
'Digest realm="%s", nonce="%s", qop="auth", stale="%s"'
% (self.realm, nonce, stale)),
('Content-Length', '0')])('')
def do_auth(self, environ, start_response):
header = environ.get('HTTP_AUTHORIZATION')
if not header or not header.startswith('Digest'):
self.send_auth_request(environ, start_response)
return None
auth = self.parse_auth_header(header[7:])
required_keys = ['username', 'realm', 'nonce', 'uri', 'response',
'nc', 'cnonce']
# Invalid response?
for key in required_keys:
if not auth.has_key(key):
self.send_auth_request(environ, start_response)
return None
# Unknown user?
self.check_reload()
if not self.hash.has_key(auth['username']):
self.send_auth_request(environ, start_response)
return None
kd = lambda x: md5(':'.join(x)).hexdigest()
a1 = self.hash[auth['username']]
a2 = kd([environ['REQUEST_METHOD'], auth['uri']])
# Is the response correct?
correct = kd([a1, auth['nonce'], auth['nc'],
auth['cnonce'], auth['qop'], a2])
if auth['response'] != correct:
self.send_auth_request(environ, start_response)
return None
# Is the nonce active, if not ask the client to use a new one
if not auth['nonce'] in self.active_nonces:
self.send_auth_request(environ, start_response, stale='true')
return None
self.active_nonces.remove(auth['nonce'])
return auth['username']
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/wsgi.py | trac/trac/web/wsgi.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
import errno
import socket
import sys
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ForkingMixIn, ThreadingMixIn
import urllib
class _ErrorsWrapper(object):
def __init__(self, logfunc):
self.logfunc = logfunc
def flush(self):
pass
def write(self, msg):
self.logfunc(msg)
def writelines(self, seq):
map(self.write, seq)
class _FileWrapper(object):
"""Wrapper for sending a file as response."""
def __init__(self, fileobj, blocksize=None):
self.fileobj = fileobj
self.blocksize = blocksize
self.read = self.fileobj.read
if hasattr(fileobj, 'close'):
self.close = fileobj.close
def __iter__(self):
return self
def next(self):
data = self.fileobj.read(self.blocksize)
if not data:
raise StopIteration
return data
class WSGIGateway(object):
"""Abstract base class for WSGI servers or gateways."""
wsgi_version = (1, 0)
wsgi_multithread = True
wsgi_multiprocess = True
wsgi_run_once = False
wsgi_file_wrapper = _FileWrapper
def __init__(self, environ, stdin=sys.stdin, stderr=sys.stderr):
"""Initialize the gateway object."""
environ['wsgi.version'] = self.wsgi_version
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTPS', '').lower() in ('yes', 'on', '1'):
environ['wsgi.url_scheme'] = 'https'
elif environ.get('HTTP_X_FORWARDED_PROTO', '').lower() == 'https':
environ['wsgi.url_scheme'] = 'https'
environ['wsgi.input'] = stdin
environ['wsgi.errors'] = stderr
environ['wsgi.multithread'] = self.wsgi_multithread
environ['wsgi.multiprocess'] = self.wsgi_multiprocess
environ['wsgi.run_once'] = self.wsgi_run_once
if self.wsgi_file_wrapper is not None:
environ['wsgi.file_wrapper'] = self.wsgi_file_wrapper
self.environ = environ
self.headers_set = []
self.headers_sent = []
def run(self, application):
"""Start the gateway with the given WSGI application."""
response = application(self.environ, self._start_response)
try:
if self.wsgi_file_wrapper is not None \
and isinstance(response, self.wsgi_file_wrapper) \
and hasattr(self, '_sendfile'):
self._sendfile(response.fileobj)
else:
for chunk in response:
if chunk:
self._write(chunk)
if not self.headers_sent:
self._write('')
finally:
if hasattr(response, 'close'):
response.close()
def _start_response(self, status, headers, exc_info=None):
"""Callback for starting a HTTP response."""
if exc_info:
try:
if self.headers_sent: # Re-raise original exception
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None # avoid dangling circular ref
else:
assert not self.headers_set, 'Response already started'
self.headers_set = [status, headers]
return self._write
def _write(self, data):
"""Callback for writing data to the response.
Concrete subclasses must implement this method."""
raise NotImplementedError
class WSGIRequestHandler(BaseHTTPRequestHandler):
def setup_environ(self):
self.raw_requestline = self.rfile.readline()
if (self.rfile.closed or # disconnect
not self.raw_requestline or # empty request
not self.parse_request()): # invalid request
self.close_connection = 1
# note that in the latter case, an error code has already been sent
return
environ = self.server.environ.copy()
environ['SERVER_PROTOCOL'] = self.request_version
environ['REQUEST_METHOD'] = self.command
if '?' in self.path:
path_info, query_string = self.path.split('?', 1)
else:
path_info, query_string = self.path, ''
environ['PATH_INFO'] = urllib.unquote(path_info)
environ['QUERY_STRING'] = query_string
host = self.address_string()
if host != self.client_address[0]:
environ['REMOTE_HOST'] = host
environ['REMOTE_ADDR'] = self.client_address[0]
if self.headers.typeheader is None:
environ['CONTENT_TYPE'] = self.headers.type
else:
environ['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
environ['CONTENT_LENGTH'] = length
for name, value in [header.split(':', 1) for header
in self.headers.headers]:
name = name.replace('-', '_').upper()
value = value.strip()
if name in environ:
# skip content length, type, etc.
continue
if 'HTTP_' + name in environ:
# comma-separate multiple headers
environ['HTTP_' + name] += ',' + value
else:
environ['HTTP_' + name] = value
return environ
def handle_one_request(self):
try:
environ = self.setup_environ()
except (IOError, socket.error), e:
environ = None
if e.args[0] in (errno.EPIPE, errno.ECONNRESET, 10053, 10054):
# client disconnect
self.close_connection = 1
else:
raise
if environ:
gateway = self.server.gateway(self, environ)
gateway.run(self.server.application)
# else we had no request or a bad request: we simply exit (#3043)
def finish(self):
"""We need to help the garbage collector a little."""
BaseHTTPRequestHandler.finish(self)
self.wfile = None
self.rfile = None
class WSGIServerGateway(WSGIGateway):
def __init__(self, handler, environ):
WSGIGateway.__init__(self, environ, handler.rfile,
_ErrorsWrapper(lambda x: handler.log_error('%s', x)))
self.handler = handler
def _write(self, data):
assert self.headers_set, 'Response not started'
if self.handler.wfile.closed:
return # don't write to an already closed file (fix for #1183)
try:
if not self.headers_sent:
status, headers = self.headers_sent = self.headers_set
self.handler.send_response(int(status[:3]))
for name, value in headers:
self.handler.send_header(name, value)
self.handler.end_headers()
self.handler.wfile.write(data)
except (IOError, socket.error), e:
if e.args[0] in (errno.EPIPE, errno.ECONNRESET, 10053, 10054):
# client disconnect
self.handler.close_connection = 1
else:
raise
class WSGIServer(HTTPServer):
def __init__(self, server_address, application, gateway=WSGIServerGateway,
request_handler=WSGIRequestHandler):
HTTPServer.__init__(self, server_address, request_handler)
self.application = application
gateway.wsgi_multithread = isinstance(self, ThreadingMixIn)
gateway.wsgi_multiprocess = isinstance(self, ForkingMixIn)
self.gateway = gateway
self.environ = {'SERVER_NAME': self.server_name,
'SERVER_PORT': str(self.server_port),
'SCRIPT_NAME': ''}
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/wikisyntax.py | trac/trac/web/tests/wikisyntax.py | import unittest
from trac.wiki.tests import formatter
TEST_CASES = """
============================== htdocs: links resolver
htdocs:release-1.0.tar.gz
[htdocs:release-1.0.tar.gz Release 1.0]
------------------------------
<p>
<a href="/chrome/site/release-1.0.tar.gz">htdocs:release-1.0.tar.gz</a>
</p>
<p>
<a href="/chrome/site/release-1.0.tar.gz">Release 1.0</a>
</p>
------------------------------
"""
def suite():
return formatter.suite(TEST_CASES, file=__file__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/api.py | trac/trac/web/tests/api.py | # -*- coding: utf-8 -*-
from trac.test import Mock
from trac.web.api import Request, RequestDone, parse_arg_list
from StringIO import StringIO
import unittest
class RequestTestCase(unittest.TestCase):
def _make_environ(self, scheme='http', server_name='example.org',
server_port=80, method='GET', script_name='/trac',
**kwargs):
environ = {'wsgi.url_scheme': scheme, 'wsgi.input': StringIO(''),
'REQUEST_METHOD': method, 'SERVER_NAME': server_name,
'SERVER_PORT': server_port, 'SCRIPT_NAME': script_name}
environ.update(kwargs)
return environ
def test_base_url(self):
environ = self._make_environ()
req = Request(environ, None)
self.assertEqual('http://example.org/trac', req.base_url)
def test_base_url_host(self):
environ = self._make_environ(server_port=8080, HTTP_HOST='example.com')
req = Request(environ, None)
self.assertEqual('http://example.com/trac', req.base_url)
def test_base_url_nondefaultport(self):
environ = self._make_environ(server_port=8080)
req = Request(environ, None)
self.assertEqual('http://example.org:8080/trac', req.base_url)
def test_base_url_https(self):
environ = self._make_environ(scheme='https', server_port=443)
req = Request(environ, None)
self.assertEqual('https://example.org/trac', req.base_url)
def test_base_url_https_host(self):
environ = self._make_environ(scheme='https', server_port=443,
HTTP_HOST='example.com')
req = Request(environ, None)
self.assertEqual('https://example.com/trac', req.base_url)
def test_base_url_https_nondefaultport(self):
environ = self._make_environ(scheme='https', server_port=8443)
req = Request(environ, None)
self.assertEqual('https://example.org:8443/trac', req.base_url)
def test_base_url_proxy(self):
environ = self._make_environ(HTTP_HOST='localhost',
HTTP_X_FORWARDED_HOST='example.com')
req = Request(environ, None)
self.assertEqual('http://localhost/trac', req.base_url)
def test_languages(self):
environ = self._make_environ()
environ['HTTP_ACCEPT_LANGUAGE'] = 'en-us,en;q=0.5'
req = Request(environ, None)
self.assertEqual(['en-us', 'en'], req.languages)
def test_redirect(self):
status_sent = []
headers_sent = {}
def start_response(status, headers):
status_sent.append(status)
headers_sent.update(dict(headers))
environ = self._make_environ(method='HEAD')
req = Request(environ, start_response)
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone, req.redirect, '/trac/test')
self.assertEqual('302 Found', status_sent[0])
self.assertEqual('http://example.org/trac/test',
headers_sent['Location'])
def test_redirect_absolute(self):
status_sent = []
headers_sent = {}
def start_response(status, headers):
status_sent.append(status)
headers_sent.update(dict(headers))
environ = self._make_environ(method='HEAD')
req = Request(environ, start_response,)
req.session = Mock(save=lambda: None)
self.assertRaises(RequestDone, req.redirect,
'http://example.com/trac/test')
self.assertEqual('302 Found', status_sent[0])
self.assertEqual('http://example.com/trac/test',
headers_sent['Location'])
def test_write_unicode(self):
buf = StringIO()
def write(data):
buf.write(data)
def start_response(status, headers):
return write
environ = self._make_environ(method='HEAD')
req = Request(environ, start_response)
req.send_header('Content-Type', 'text/plain;charset=utf-8')
# we didn't set Content-Length, so we get a RuntimeError for that
self.assertRaises(RuntimeError, req.write, u'Föö')
req = Request(environ, start_response)
req.send_header('Content-Type', 'text/plain;charset=utf-8')
req.send_header('Content-Length', 0)
# anyway we're not supposed to send unicode, so we get a ValueError
self.assertRaises(ValueError, req.write, u'Föö')
def test_invalid_cookies(self):
environ = self._make_environ(HTTP_COOKIE='bad:key=value;')
req = Request(environ, None)
self.assertEqual('', str(req.incookie))
def test_multiple_cookies(self):
environ = self._make_environ(HTTP_COOKIE='key=value1; key=value2;')
req = Request(environ, None)
self.assertEqual('Set-Cookie: key=value1',
str(req.incookie).rstrip(';'))
def test_read(self):
environ = self._make_environ(**{'wsgi.input': StringIO('test input')})
req = Request(environ, None)
self.assertEqual('test input', req.read())
def test_read_size(self):
environ = self._make_environ(**{'wsgi.input': StringIO('test input')})
req = Request(environ, None)
self.assertEqual('test', req.read(size=4))
def test_qs_on_post(self):
"""Make sure req.args parsing is consistent even after the backwards
incompatible change introduced in Python 2.6.
"""
environ = self._make_environ(method='GET',
**{'QUERY_STRING': 'action=foo'})
req = Request(environ, None)
self.assertEqual('foo', req.args['action'])
environ = self._make_environ(method='POST',
**{'wsgi.input': StringIO('action=bar'),
'CONTENT_LENGTH': '10',
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'QUERY_STRING': 'action=foo'})
req = Request(environ, None)
self.assertEqual('bar', req.args['action'])
class ParseArgListTestCase(unittest.TestCase):
def test_qs_str(self):
args = parse_arg_list('k%C3%A9y=resum%C3%A9&r%C3%A9sum%C3%A9')
self.assertTrue(unicode, type(args[0][0]))
self.assertTrue(unicode, type(args[0][1]))
self.assertEqual(u'kéy', args[0][0])
self.assertEqual(u'resumé', args[0][1])
self.assertTrue(unicode, type(args[1][0]))
self.assertEqual(u'résumé', args[1][0])
def test_qs_unicode(self):
args = parse_arg_list(u'ké%3Dy=re%26su=mé&résu%26mé')
self.assertTrue(unicode, type(args[0][0]))
self.assertTrue(unicode, type(args[0][1]))
self.assertEqual(u'ké=y', args[0][0])
self.assertEqual(u're&su=mé', args[0][1])
self.assertTrue(unicode, type(args[1][0]))
self.assertEqual(u'résu&mé', args[1][0])
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(RequestTestCase, 'test'))
suite.addTest(unittest.makeSuite(ParseArgListTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/main.py | trac/trac/web/tests/main.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.util import create_file
from trac.web.main import get_environments
import tempfile
import unittest
import os.path
class EnvironmentsTestCase(unittest.TestCase):
dirs = ('mydir1', 'mydir2', '.hidden_dir')
files = ('myfile1', 'myfile2', '.dot_file')
def setUp(self):
self.parent_dir = tempfile.mkdtemp(prefix='trac-')
self.tracignore = os.path.join(self.parent_dir, '.tracignore')
for dname in self.dirs:
os.mkdir(os.path.join(self.parent_dir, dname))
for fname in self.files:
create_file(os.path.join(self.parent_dir, fname))
self.environ = {
'trac.env_paths': [],
'trac.env_parent_dir': self.parent_dir,
}
def tearDown(self):
for fname in self.files:
os.unlink(os.path.join(self.parent_dir, fname))
for dname in self.dirs:
os.rmdir(os.path.join(self.parent_dir, dname))
if os.path.exists(self.tracignore):
os.unlink(self.tracignore)
os.rmdir(self.parent_dir)
def env_paths(self, projects):
return dict((project, os.path.normpath(os.path.join(self.parent_dir,
project)))
for project in projects)
def test_default_tracignore(self):
self.assertEquals(self.env_paths(['mydir1', 'mydir2']),
get_environments(self.environ))
def test_empty_tracignore(self):
create_file(self.tracignore)
self.assertEquals(self.env_paths(['mydir1', 'mydir2', '.hidden_dir']),
get_environments(self.environ))
def test_qmark_pattern_tracignore(self):
create_file(self.tracignore, 'mydir?')
self.assertEquals(self.env_paths(['.hidden_dir']),
get_environments(self.environ))
def test_star_pattern_tracignore(self):
create_file(self.tracignore, 'my*\n.hidden_dir')
self.assertEquals({}, get_environments(self.environ))
def test_combined_tracignore(self):
create_file(self.tracignore, 'my*i?1\n\n#mydir2')
self.assertEquals(self.env_paths(['mydir2', '.hidden_dir']),
get_environments(self.environ))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EnvironmentsTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/chrome.py | trac/trac/web/tests/chrome.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import Component, implements
from trac.test import EnvironmentStub
from trac.tests.contentgen import random_sentence
from trac.web.chrome import (
Chrome, INavigationContributor, add_link, add_meta, add_notice, add_script,
add_script_data, add_stylesheet, add_warning)
from trac.web.href import Href
import unittest
class Request(object):
locale = None
def __init__(self, **kwargs):
self.chrome = {}
for k, v in kwargs.items():
setattr(self, k, v)
class ChromeTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
from trac.core import ComponentMeta
self._old_registry = ComponentMeta._registry
ComponentMeta._registry = {}
def tearDown(self):
from trac.core import ComponentMeta
ComponentMeta._registry = self._old_registry
def test_add_meta(self):
req = Request(href=Href('/trac.cgi'))
add_meta(req, 'Jim Smith', name='Author', scheme='test', lang='en-us')
add_meta(req, 'Tue, 20 Aug 1996 14:25:27 GMT', http_equiv='Expires')
metas = req.chrome['metas']
self.assertEqual(2, len(metas))
meta = metas[0]
self.assertEqual('Jim Smith', meta['content'])
self.assertEqual('Author', meta['name'])
self.assertEqual('test', meta['scheme'])
self.assertEqual('en-us', meta['lang'])
self.assertEqual('en-us', meta['xml:lang'])
meta = metas[1]
self.assertEqual('Tue, 20 Aug 1996 14:25:27 GMT', meta['content'])
self.assertEqual('Expires', meta['http-equiv'])
def test_add_link_simple(self):
req = Request(href=Href('/trac.cgi'))
add_link(req, 'start', '/trac/wiki')
self.assertEqual('/trac/wiki',
req.chrome['links']['start'][0]['href'])
def test_add_link_advanced(self):
req = Request(href=Href('/trac.cgi'))
add_link(req, 'start', '/trac/wiki', 'Start page', 'text/html', 'home')
link = req.chrome['links']['start'][0]
self.assertEqual('/trac/wiki', link['href'])
self.assertEqual('Start page', link['title'])
self.assertEqual('text/html', link['type'])
self.assertEqual('home', link['class'])
def test_add_script(self):
req = Request(base_path='/trac.cgi', href=Href('/trac.cgi'))
add_script(req, 'common/js/trac.js')
add_script(req, 'common/js/trac.js')
add_script(req, 'http://example.com/trac.js')
scripts = req.chrome['scripts']
self.assertEqual(2, len(scripts))
self.assertEqual('text/javascript', scripts[0]['type'])
self.assertEqual('/trac.cgi/chrome/common/js/trac.js',
scripts[0]['href'])
self.assertEqual('text/javascript', scripts[1]['type'])
self.assertEqual('http://example.com/trac.js',
scripts[1]['href'])
def test_add_script_data(self):
req = Request(href=Href('/trac.cgi'))
add_script_data(req, {'var1': 1, 'var2': 'Testing'})
add_script_data(req, var2='More testing', var3=3)
self.assertEqual({'var1': 1, 'var2': 'More testing', 'var3': 3},
req.chrome['script_data'])
def test_add_stylesheet(self):
req = Request(base_path='/trac.cgi', href=Href('/trac.cgi'))
add_stylesheet(req, 'common/css/trac.css')
add_stylesheet(req, 'common/css/trac.css')
add_stylesheet(req, 'https://example.com/trac.css')
links = req.chrome['links']['stylesheet']
self.assertEqual(2, len(links))
self.assertEqual('text/css', links[0]['type'])
self.assertEqual('/trac.cgi/chrome/common/css/trac.css',
links[0]['href'])
self.assertEqual('text/css', links[1]['type'])
self.assertEqual('https://example.com/trac.css',
links[1]['href'])
def test_add_stylesheet_media(self):
req = Request(base_path='/trac.cgi', href=Href('/trac.cgi'))
add_stylesheet(req, 'foo.css', media='print')
links = req.chrome['links']['stylesheet']
self.assertEqual(1, len(links))
self.assertEqual('print', links[0]['media'])
def test_add_warning_is_unique(self):
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='',
add_redirect_listener=lambda listener: None)
Chrome(self.env).prepare_request(req)
message = random_sentence(5)
add_warning(req, message)
add_warning(req, message)
self.assertEqual(1, len(req.chrome['warnings']))
def test_add_notice_is_unique(self):
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='',
add_redirect_listener=lambda listener: None)
Chrome(self.env).prepare_request(req)
message = random_sentence(5)
add_notice(req, message)
add_notice(req, message)
self.assertEqual(1, len(req.chrome['notices']))
def test_htdocs_location(self):
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='',
add_redirect_listener=lambda listener: None)
info = Chrome(self.env).prepare_request(req)
self.assertEqual('/trac.cgi/chrome/common/', info['htdocs_location'])
def test_logo(self):
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='',
add_redirect_listener=lambda listener: None)
# Verify that no logo data is put in the HDF if no logo is configured
self.env.config.set('header_logo', 'src', '')
info = Chrome(self.env).prepare_request(req)
assert 'src' not in info['logo']
assert 'src_abs' not in info['logo']
# Test with a relative path to the logo image
self.env.config.set('header_logo', 'src', 'foo.png')
info = Chrome(self.env).prepare_request(req)
self.assertEqual('/trac.cgi/chrome/common/foo.png', info['logo']['src'])
self.assertEqual('http://example.org/trac.cgi/chrome/common/foo.png',
info['logo']['src_abs'])
# Test with a location in project htdocs
self.env.config.set('header_logo', 'src', 'site/foo.png')
info = Chrome(self.env).prepare_request(req)
self.assertEqual('/trac.cgi/chrome/site/foo.png', info['logo']['src'])
self.assertEqual('http://example.org/trac.cgi/chrome/site/foo.png',
info['logo']['src_abs'])
# Test with a server-relative path to the logo image
self.env.config.set('header_logo', 'src', '/img/foo.png')
info = Chrome(self.env).prepare_request(req)
self.assertEqual('/img/foo.png', info['logo']['src'])
self.assertEqual('/img/foo.png', info['logo']['src_abs'])
# Test with an absolute path to the logo image
self.env.config.set('header_logo', 'src',
'http://www.example.org/foo.png')
info = Chrome(self.env).prepare_request(req)
self.assertEqual('http://www.example.org/foo.png', info['logo']['src'])
self.assertEqual('http://www.example.org/foo.png', info['logo']['src_abs'])
def test_default_links(self):
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='',
add_redirect_listener=lambda listener: None)
links = Chrome(self.env).prepare_request(req)['links']
self.assertEqual('/trac.cgi/wiki', links['start'][0]['href'])
self.assertEqual('/trac.cgi/search', links['search'][0]['href'])
self.assertEqual('/trac.cgi/wiki/TracGuide', links['help'][0]['href'])
self.assertEqual('/trac.cgi/chrome/common/css/trac.css',
links['stylesheet'][0]['href'])
def test_icon_links(self):
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='',
add_redirect_listener=lambda listener: None)
chrome = Chrome(self.env)
# No icon set in config, so no icon links
self.env.config.set('project', 'icon', '')
links = chrome.prepare_request(req)['links']
assert 'icon' not in links
assert 'shortcut icon' not in links
# Relative URL for icon config option
self.env.config.set('project', 'icon', 'foo.ico')
links = chrome.prepare_request(req)['links']
self.assertEqual('/trac.cgi/chrome/common/foo.ico',
links['icon'][0]['href'])
self.assertEqual('/trac.cgi/chrome/common/foo.ico',
links['shortcut icon'][0]['href'])
# URL relative to the server root for icon config option
self.env.config.set('project', 'icon', '/favicon.ico')
links = chrome.prepare_request(req)['links']
self.assertEqual('/favicon.ico', links['icon'][0]['href'])
self.assertEqual('/favicon.ico', links['shortcut icon'][0]['href'])
# Absolute URL for icon config option
self.env.config.set('project', 'icon', 'http://example.com/favicon.ico')
links = chrome.prepare_request(req)['links']
self.assertEqual('http://example.com/favicon.ico',
links['icon'][0]['href'])
self.assertEqual('http://example.com/favicon.ico',
links['shortcut icon'][0]['href'])
def test_nav_contributor(self):
class TestNavigationContributor(Component):
implements(INavigationContributor)
def get_active_navigation_item(self, req):
return None
def get_navigation_items(self, req):
yield 'metanav', 'test', 'Test'
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), path_info='/',
base_path='/trac.cgi',
add_redirect_listener=lambda listener: None)
nav = Chrome(self.env).prepare_request(req)['nav']
self.assertEqual({'name': 'test', 'label': 'Test', 'active': False},
nav['metanav'][0])
def test_nav_contributor_active(self):
class TestNavigationContributor(Component):
implements(INavigationContributor)
def get_active_navigation_item(self, req):
return 'test'
def get_navigation_items(self, req):
yield 'metanav', 'test', 'Test'
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), path_info='/',
base_path='/trac.cgi',
add_redirect_listener=lambda listener: None)
handler = TestNavigationContributor(self.env)
nav = Chrome(self.env).prepare_request(req, handler)['nav']
self.assertEqual({'name': 'test', 'label': 'Test', 'active': True},
nav['metanav'][0])
def test_nav_contributor_order(self):
class TestNavigationContributor1(Component):
implements(INavigationContributor)
def get_active_navigation_item(self, req):
return None
def get_navigation_items(self, req):
yield 'metanav', 'test1', 'Test 1'
class TestNavigationContributor2(Component):
implements(INavigationContributor)
def get_active_navigation_item(self, req):
return None
def get_navigation_items(self, req):
yield 'metanav', 'test2', 'Test 2'
req = Request(abs_href=Href('http://example.org/trac.cgi'),
href=Href('/trac.cgi'), base_path='/trac.cgi',
path_info='/',
add_redirect_listener=lambda listener: None)
chrome = Chrome(self.env)
# Test with both items set in the order option
self.env.config.set('trac', 'metanav', 'test2, test1')
items = chrome.prepare_request(req)['nav']['metanav']
self.assertEqual('test2', items[0]['name'])
self.assertEqual('test1', items[1]['name'])
# Test with only test1 in the order options
self.env.config.set('trac', 'metanav', 'test1')
items = chrome.prepare_request(req)['nav']['metanav']
self.assertEqual('test1', items[0]['name'])
self.assertEqual('test2', items[1]['name'])
# Test with only test2 in the order options
self.env.config.set('trac', 'metanav', 'test2')
items = chrome.prepare_request(req)['nav']['metanav']
self.assertEqual('test2', items[0]['name'])
self.assertEqual('test1', items[1]['name'])
# Test with none in the order options (order corresponds to
# registration order)
self.env.config.set('trac', 'metanav', 'foo, bar')
items = chrome.prepare_request(req)['nav']['metanav']
self.assertEqual('test1', items[0]['name'])
self.assertEqual('test2', items[1]['name'])
def suite():
return unittest.makeSuite(ChromeTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/__init__.py | trac/trac/web/tests/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import unittest
from trac.web.tests import api, auth, cgi_frontend, chrome, href, session, \
wikisyntax, main
def suite():
suite = unittest.TestSuite()
suite.addTest(api.suite())
suite.addTest(auth.suite())
suite.addTest(cgi_frontend.suite())
suite.addTest(chrome.suite())
suite.addTest(href.suite())
suite.addTest(session.suite())
suite.addTest(wikisyntax.suite())
suite.addTest(main.suite())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/session.py | trac/trac/web/tests/session.py | from __future__ import with_statement
from Cookie import SimpleCookie as Cookie
import time
from datetime import datetime
import unittest
from trac.test import EnvironmentStub, Mock
from trac.web.session import DetachedSession, Session, PURGE_AGE, \
UPDATE_INTERVAL, SessionAdmin
from trac.core import TracError
def _prep_session_table(env, spread_visits=False):
""" Populate the session table with known values.
:return: a tuple of lists `(auth_list, anon_list, all_list)`
:since 1.0: changed `db` input parameter to `env`
"""
with env.db_transaction as db:
db("DELETE FROM session")
db("DELETE FROM session_attribute")
last_visit_base = time.mktime(datetime(2010, 1, 1).timetuple())
visit_delta = 86400 if spread_visits else 0
auth_list, anon_list = [], []
with env.db_transaction as db:
for x in xrange(20):
sid = 'name%02d' % x
authenticated = int(x < 10)
last_visit = last_visit_base + (visit_delta * x)
val = 'val%02d' % x
data = (sid, authenticated, last_visit, val, val)
if authenticated:
auth_list.append(data)
else:
anon_list.append(data)
db("INSERT INTO session VALUES (%s, %s, %s)",
(sid, authenticated, last_visit))
db("INSERT INTO session_attribute VALUES (%s, %s, 'name', %s)",
(sid, authenticated, val))
db("INSERT INTO session_attribute VALUES (%s, %s, 'email', %s)",
(sid, authenticated, val))
all_list = auth_list + anon_list
return (auth_list, anon_list, all_list)
def get_session_info(env, sid):
""":since 1.0: changed `db` input parameter to `env`"""
for row in env.db_query("""
SELECT DISTINCT s.sid, n.value, e.value FROM session AS s
LEFT JOIN session_attribute AS n ON (n.sid=s.sid AND n.name='name')
LEFT JOIN session_attribute AS e ON (e.sid=s.sid AND e.name='email')
WHERE s.sid=%s
""", (sid,)):
return row
else:
return (None, None, None)
class SessionTestCase(unittest.TestCase):
"""Unit tests for the persistent session support."""
def setUp(self):
self.env = EnvironmentStub()
def tearDown(self):
self.env.reset_db()
def test_new_session(self):
"""
Verify that a session cookie gets sent back to the client for a new
session.
"""
cookie = Cookie()
req = Mock(incookie=Cookie(), outcookie=cookie, authname='anonymous',
base_path='/')
session = Session(self.env, req)
self.assertEqual(session.sid, cookie['trac_session'].value)
self.assertEqual(0, self.env.db_query(
"SELECT COUNT(*) FROM session")[0][0])
def test_anonymous_session(self):
"""
Verify that session variables are stored in the database.
"""
incookie = Cookie()
incookie['trac_session'] = '123456'
outcookie = Cookie()
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=outcookie)
session = Session(self.env, req)
self.assertEquals('123456', session.sid)
self.failIf(outcookie.has_key('trac_session'))
def test_authenticated_session(self):
"""
Verifies that a session cookie does not get used if the user is logged
in, and that Trac expires the cookie.
"""
incookie = Cookie()
incookie['trac_session'] = '123456'
outcookie = Cookie()
req = Mock(authname='john', base_path='/', incookie=incookie,
outcookie=outcookie)
session = Session(self.env, req)
self.assertEqual('john', session.sid)
session['foo'] = 'bar'
session.save()
self.assertEquals(0, outcookie['trac_session']['expires'])
def test_session_promotion(self):
"""
Verifies that an existing anonymous session gets promoted to an
authenticated session when the user logs in.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, 0)")
incookie = Cookie()
incookie['trac_session'] = '123456'
outcookie = Cookie()
req = Mock(authname='john', base_path='/', incookie=incookie,
outcookie=outcookie)
session = Session(self.env, req)
self.assertEqual('john', session.sid)
session.save()
self.assertEqual([('john', 1)], self.env.db_query(
"SELECT sid, authenticated FROM session"))
def test_new_session_promotion(self):
"""
Verifies that even without a preexisting anonymous session,
an authenticated session will be created when the user logs in.
(same test as above without the initial INSERT)
"""
with self.env.db_transaction as db:
incookie = Cookie()
incookie['trac_session'] = '123456'
outcookie = Cookie()
req = Mock(authname='john', base_path='/', incookie=incookie,
outcookie=outcookie)
session = Session(self.env, req)
self.assertEqual('john', session.sid)
session.save()
self.assertEqual([('john', 1)], self.env.db_query(
"SELECT sid, authenticated FROM session"))
def test_add_anonymous_session_var(self):
"""
Verify that new variables are inserted into the 'session' table in the
database for an anonymous session.
"""
incookie = Cookie()
incookie['trac_session'] = '123456'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
session['foo'] = 'bar'
session.save()
self.assertEqual('bar', self.env.db_query(
"SELECT value FROM session_attribute WHERE sid='123456'")[0][0])
def test_modify_anonymous_session_var(self):
"""
Verify that modifying an existing variable updates the 'session' table
accordingly for an anonymous session.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, 0)")
db("""
INSERT INTO session_attribute VALUES
('123456', 0, 'foo', 'bar')
""")
incookie = Cookie()
incookie['trac_session'] = '123456'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
self.assertEqual('bar', session['foo'])
session['foo'] = 'baz'
session.save()
self.assertEqual('baz', self.env.db_query(
"SELECT value FROM session_attribute WHERE sid='123456'")[0][0])
def test_delete_anonymous_session_var(self):
"""
Verify that modifying a variable updates the 'session' table accordingly
for an anonymous session.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, 0)")
db("""
INSERT INTO session_attribute VALUES
('123456', 0, 'foo', 'bar')
""")
incookie = Cookie()
incookie['trac_session'] = '123456'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
self.assertEqual('bar', session['foo'])
del session['foo']
session.save()
self.assertEqual(0, self.env.db_query("""
SELECT COUNT(*) FROM session_attribute
WHERE sid='123456' AND name='foo'
""")[0][0])
def test_purge_anonymous_session(self):
"""
Verify that old sessions get purged.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, %s)", (0,))
db("INSERT INTO session VALUES ('987654', 0, %s)",
(int(time.time() - PURGE_AGE - 3600),))
db("""
INSERT INTO session_attribute
VALUES ('987654', 0, 'foo', 'bar')
""")
# We need to modify a different session to trigger the purging
incookie = Cookie()
incookie['trac_session'] = '123456'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
session['foo'] = 'bar'
session.save()
self.assertEqual(0, self.env.db_query("""
SELECT COUNT(*) FROM session WHERE sid='987654' AND authenticated=0
""")[0][0])
def test_delete_empty_session(self):
"""
Verify that a session gets deleted when it doesn't have any data except
for the 'last_visit' timestamp.
"""
now = time.time()
# Make sure the session has data so that it doesn't get dropped
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, %s)",
(int(now - UPDATE_INTERVAL - 3600),))
db("""
INSERT INTO session_attribute
VALUES ('123456', 0, 'foo', 'bar')
""")
incookie = Cookie()
incookie['trac_session'] = '123456'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
del session['foo']
session.save()
self.assertEqual(0, self.env.db_query("""
SELECT COUNT(*) FROM session WHERE sid='123456' AND authenticated=0
""")[0][0])
def test_change_anonymous_session(self):
"""
Verify that changing from one anonymous session to an inexisting
anonymous session creates the new session and doesn't carry over
variables from the previous session.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, 0)")
db("""
INSERT INTO session_attribute
VALUES ('123456', 0, 'foo', 'bar')
""")
incookie = Cookie()
incookie['trac_session'] = '123456'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
self.assertEqual({'foo': 'bar'}, session)
session.get_session('7890')
session['baz'] = 'moo'
session.save()
self.assertEqual({'baz': 'moo'}, session)
with self.env.db_query as db:
self.assertEqual(1, db("""
SELECT COUNT(*) FROM session
WHERE sid='7890' AND authenticated=0
""")[0][0])
self.assertEqual([('baz', 'moo')], db("""
SELECT name, value FROM session_attribute
WHERE sid='7890' AND authenticated=0
"""))
def test_add_authenticated_session_var(self):
"""
Verify that new variables are inserted into the 'session' table in the
database for an authenticated session.
"""
req = Mock(authname='john', base_path='/', incookie=Cookie())
session = Session(self.env, req)
session['foo'] = 'bar'
session.save()
self.assertEqual('bar', self.env.db_query("""
SELECT value FROM session_attribute WHERE sid='john' AND name='foo'
""")[0][0])
def test_modify_authenticated_session_var(self):
"""
Verify that modifying an existing variable updates the 'session' table
accordingly for an authenticated session.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('john', 1, 0)")
db("INSERT INTO session_attribute VALUES ('john',1,'foo','bar')")
req = Mock(authname='john', base_path='/', incookie=Cookie())
session = Session(self.env, req)
self.assertEqual('bar', session['foo'])
session['foo'] = 'baz'
session.save()
self.assertEqual('baz', self.env.db_query("""
SELECT value FROM session_attribute WHERE sid='john' AND name='foo'
""")[0][0])
def test_authenticated_session_independence_var(self):
"""
Verify that an anonymous session with the same name as an authenticated
session doesn't interfere with the latter.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('john', 1, 0)")
db("INSERT INTO session_attribute VALUES ('john',1,'foo','bar')")
self.assertEqual('bar', self.env.db_query("""
SELECT value FROM session_attribute
WHERE sid='john' AND authenticated=1 AND name='foo'
""")[0][0])
incookie = Cookie()
incookie['trac_session'] = 'john'
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=Cookie())
session = Session(self.env, req)
self.assert_('foo' not in session)
session['foo'] = 'baz'
session.save()
rows = self.env.db_query("""
SELECT value FROM session_attribute
WHERE sid='john' AND authenticated=1 AND name='foo'
""")
self.assertEqual(1, len(rows))
self.assertEqual('bar', rows[0][0])
rows = self.env.db_query("""
SELECT value FROM session_attribute
WHERE sid='john' AND authenticated=0 AND name='foo'
""")
self.assertEqual(1, len(rows))
self.assertEqual('baz', rows[0][0])
def test_delete_authenticated_session_var(self):
"""
Verify that deleting a variable updates the 'session' table accordingly
for an authenticated session.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('john', 1, 0)")
db("INSERT INTO session_attribute VALUES ('john', 1, 'foo', 'bar')")
req = Mock(authname='john', base_path='/', incookie=Cookie())
session = Session(self.env, req)
self.assertEqual('bar', session['foo'])
del session['foo']
session.save()
self.assertEqual(0, self.env.db_query("""
SELECT COUNT(*) FROM session_attribute
WHERE sid='john' AND name='foo'
""")[0][0])
def test_update_session(self):
"""
Verify that accessing a session after one day updates the sessions
'last_visit' variable so that the session doesn't get purged.
"""
now = time.time()
# Make sure the session has data so that it doesn't get dropped
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('123456', 0, 1)")
db("""
INSERT INTO session_attribute
VALUES ('123456', 0, 'foo', 'bar')
""")
incookie = Cookie()
incookie['trac_session'] = '123456'
outcookie = Cookie()
req = Mock(authname='anonymous', base_path='/', incookie=incookie,
outcookie=outcookie)
session = Session(self.env, req)
session['modified'] = True
session.save() # updating does require modifications
self.assertEqual(PURGE_AGE, outcookie['trac_session']['expires'])
self.assertAlmostEqual(now, int(self.env.db_query("""
SELECT last_visit FROM session
WHERE sid='123456' AND authenticated=0
""")[0][0]), -1)
def test_modify_detached_session(self):
"""
Verify that modifying a variable in a session not associated with a
request updates the database accordingly.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('john', 1, 0)")
db("INSERT INTO session_attribute VALUES ('john', 1, 'foo', 'bar')")
session = DetachedSession(self.env, 'john')
self.assertEqual('bar', session['foo'])
session['foo'] = 'baz'
session.save()
self.assertEqual('baz', self.env.db_query("""
SELECT value FROM session_attribute WHERE sid='john' AND name='foo'
""")[0][0])
def test_delete_detached_session_var(self):
"""
Verify that removing a variable in a session not associated with a
request deletes the variable from the database.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('john', 1, 0)")
db("INSERT INTO session_attribute VALUES ('john', 1, 'foo', 'bar')")
session = DetachedSession(self.env, 'john')
self.assertEqual('bar', session['foo'])
del session['foo']
session.save()
self.assertEqual(0, self.env.db_query("""
SELECT COUNT(*) FROM session_attribute
WHERE sid='john' AND name='foo'
""")[0][0])
def test_session_set(self):
"""Verify that setting a variable in a session to the default value
removes it from the session.
"""
with self.env.db_transaction as db:
db("INSERT INTO session VALUES ('john', 1, 0)")
db("INSERT INTO session_attribute VALUES ('john', 1, 'foo', 'bar')")
session = DetachedSession(self.env, 'john')
self.assertEqual('bar', session['foo'])
# Setting the variable to the default value removes the variable
with self.env.db_transaction as db:
session.set('foo', 'default', 'default')
session.save()
self.assertEqual(0, self.env.db_query("""
SELECT COUNT(*) FROM session_attribute
WHERE sid='john' AND name='foo'
""")[0][0])
# Setting the variable to a value different from the default sets it
with self.env.db_transaction as db:
session.set('foo', 'something', 'default')
session.save()
self.assertEqual('something', self.env.db_query("""
SELECT value FROM session_attribute
WHERE sid='john' AND name='foo'
""")[0][0])
def test_session_admin_list(self):
auth_list, anon_list, all_list = _prep_session_table(self.env)
sess_admin = SessionAdmin(self.env)
# Verify the empty case
self.assertRaises(StopIteration, sess_admin._get_list([]).next)
self.assertEqual([i for i in sess_admin._get_list(['authenticated'])],
auth_list)
self.assertEqual([i for i in sess_admin._get_list(['anonymous'])],
anon_list)
self.assertEqual([i for i in sess_admin._get_list(['*'])], all_list)
self.assertEqual([i for i in sess_admin._get_list(['name00'])][0],
auth_list[0])
self.assertEqual([i for i in sess_admin._get_list(['name10:0'])][0],
anon_list[0])
self.assertEqual([i for i in sess_admin._get_list(['name00', 'name01',
'name02'])],
all_list[:3])
def test_session_admin_add(self):
auth_list, anon_list, all_list = _prep_session_table(self.env)
sess_admin = SessionAdmin(self.env)
self.assertRaises(Exception, sess_admin._do_add, 'name00')
sess_admin._do_add('john')
result = get_session_info(self.env, 'john')
self.assertEqual(result, ('john', None, None))
sess_admin._do_add('john1', 'John1')
result = get_session_info(self.env, 'john1')
self.assertEqual(result, ('john1', 'John1', None))
sess_admin._do_add('john2', 'John2', 'john2@example.org')
result = get_session_info(self.env, 'john2')
self.assertEqual(result, ('john2', 'John2', 'john2@example.org'))
def test_session_admin_set(self):
auth_list, anon_list, all_list = _prep_session_table(self.env)
sess_admin = SessionAdmin(self.env)
self.assertRaises(TracError, sess_admin._do_set, 'name', 'nothere',
'foo')
sess_admin._do_set('name', 'name00', 'john')
result = get_session_info(self.env, 'name00')
self.assertEqual(result, ('name00', 'john', 'val00'))
sess_admin._do_set('email', 'name00', 'john@example.org')
result = get_session_info(self.env, 'name00')
self.assertEqual(result, ('name00', 'john', 'john@example.org'))
def test_session_admin_delete(self):
auth_list, anon_list, all_list = _prep_session_table(self.env)
sess_admin = SessionAdmin(self.env)
sess_admin._do_delete('name00')
result = get_session_info(self.env, 'name00')
self.assertEqual(result, (None, None, None))
sess_admin._do_delete('nothere')
result = get_session_info(self.env, 'nothere')
self.assertEqual(result, (None, None, None))
auth_list, anon_list, all_list = _prep_session_table(self.env)
sess_admin._do_delete('anonymous')
result = [i for i in sess_admin._get_list(['*'])]
self.assertEqual(result, auth_list)
def test_session_admin_purge(self):
sess_admin = SessionAdmin(self.env)
auth_list, anon_list, all_list = \
_prep_session_table(self.env, spread_visits=True)
sess_admin._do_purge('2010-01-02')
result = [i for i in sess_admin._get_list(['*'])]
self.assertEqual(result, auth_list + anon_list)
result = get_session_info(self.env, anon_list[0][0])
self.assertEqual(result, ('name10', 'val10', 'val10'))
result = get_session_info(self.env, anon_list[1][0])
self.assertEqual(result, ('name11', 'val11', 'val11'))
auth_list, anon_list, all_list = \
_prep_session_table(self.env, spread_visits=True)
sess_admin._do_purge('2010-01-12')
result = [i for i in sess_admin._get_list(['*'])]
self.assertEqual(result, auth_list + anon_list[1:])
rows = self.env.db_query("""
SELECT name, value FROM session_attribute WHERE sid = %s
""", (anon_list[0][0],))
self.assertEqual([], rows)
result = get_session_info(self.env, anon_list[1][0])
self.assertEqual(result, ('name11', 'val11', 'val11'))
def suite():
return unittest.makeSuite(SessionTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/cgi_frontend.py | trac/trac/web/tests/cgi_frontend.py | #from trac.web.cgi_frontend import CGIRequest
import unittest
class CGIRequestTestCase(unittest.TestCase):
pass
def suite():
return unittest.makeSuite(CGIRequestTestCase, 'test')
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/href.py | trac/trac/web/tests/href.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import doctest
import unittest
import trac.web.href
class HrefTestCase(unittest.TestCase):
"""Unit tests for Href URL builder."""
def test_non_empty_base(self):
"""Build URLs with a non-empty base."""
href = trac.web.href.Href('/base')
self.assertEqual('/base', href())
self.assertEqual('/base', href('/'))
self.assertEqual('/base/sub', href('sub'))
self.assertEqual('/base/sub', href('/sub/'))
self.assertEqual('/base/sub/other', href('sub', 'other'))
self.assertEqual('/base/sub/other', href('sub', None, 'other'))
self.assertEqual('/base/sub/other', href('sub', '', 'other'))
self.assertEqual('/base/sub/other', href('sub', '', '', 'other'))
self.assertEqual('/base/sub/other', href('', 'sub', 'other'))
self.assertEqual('/base/sub/other/', href('sub', 'other', ''))
self.assertEqual('/base/with%20special%26chars',
href('with special&chars'))
assert href('page', param='value', other='other value', more=None) in [
'/base/page?param=value&other=other+value',
'/base/page?other=other+value¶m=value']
self.assertEqual('/base/page?param=multiple¶m=values',
href('page', param=['multiple', 'values']))
self.assertEqual('/base/path/to/file/', href + '/path/to/file/')
self.assertEqual('/base/path/to/file', href + 'path/to/file')
self.assertEqual('/base', href + '')
def test_base_with_trailing_slash(self):
"""Build URLs with a base with a trailing slash."""
href = trac.web.href.Href('/base/')
self.assertEqual('/base', href())
self.assertEqual('/base', href('/'))
self.assertEqual('/base/sub', href('sub'))
self.assertEqual('/base/sub', href('/sub/'))
self.assertEqual('/base/path/to/file/', href + '/path/to/file/')
self.assertEqual('/base/path/to/file', href + 'path/to/file')
self.assertEqual('/base', href + '')
def test_empty_base(self):
"""Build URLs with an empty base."""
href = trac.web.href.Href('')
self.assertEqual('/', href())
self.assertEqual('/', href('/'))
self.assertEqual('/sub', href('sub'))
self.assertEqual('/sub', href('/sub/'))
self.assertEqual('/sub/other', href('sub', 'other'))
self.assertEqual('/sub/other', href('sub', None, 'other'))
self.assertEqual('/sub/other', href('sub', '', 'other'))
self.assertEqual('/sub/other', href('sub', '', '', 'other'))
self.assertEqual('/sub/other', href('', 'sub', 'other'))
self.assertEqual('/sub/other/', href('sub', 'other', ''))
self.assertEqual('/with%20special%26chars',
href('with special&chars'))
assert href('page', param='value', other='other value', more=None) in [
'/page?param=value&other=other+value',
'/page?other=other+value¶m=value']
self.assertEqual('/page?param=multiple¶m=values',
href('page', param=['multiple', 'values']))
self.assertEqual('/path/to/file/', href + '/path/to/file/')
self.assertEqual('/path/to/file', href + 'path/to/file')
self.assertEqual('/', href + '')
def test_params_subclasses(self):
"""Parameters passed using subclasses of dict, list and tuple."""
class MyDict(dict):
pass
class MyList(list):
pass
class MyTuple(tuple):
pass
href = trac.web.href.Href('/base')
self.assertEqual('/base?param=test¶m=other',
href(param=MyList(['test', 'other'])))
self.assertEqual('/base?param=test¶m=other',
href(param=MyTuple(['test', 'other'])))
assert href(MyDict(param='value', other='other value')) in [
'/base?param=value&other=other+value',
'/base?other=other+value¶m=value']
self.assertEqual('/base?param=value&other=other+value',
href(MyList([('param', 'value'), ('other', 'other value')])))
self.assertEqual('/base?param=value&other=other+value',
href(MyTuple([('param', 'value'), ('other', 'other value')])))
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(trac.web.href))
suite.addTest(unittest.makeSuite(HrefTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/trac/web/tests/auth.py | trac/trac/web/tests/auth.py | # -*- coding: utf-8 -*-
import os
from trac.core import TracError
from trac.test import EnvironmentStub, Mock
from trac.web.auth import BasicAuthentication, LoginModule
from trac.web.href import Href
from Cookie import SimpleCookie as Cookie
import unittest
class LoginModuleTestCase(unittest.TestCase):
def setUp(self):
self.env = EnvironmentStub()
self.module = LoginModule(self.env)
def tearDown(self):
self.env.reset_db()
def test_anonymous_access(self):
req = Mock(incookie=Cookie(), href=Href('/trac.cgi'),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertEqual(None, self.module.authenticate(req))
def test_unknown_cookie_access(self):
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=Cookie(),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertEqual(None, self.module.authenticate(req))
def test_known_cookie_access(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(incookie=incookie, outcookie=outcookie,
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user=None)
self.assertEqual('john', self.module.authenticate(req))
self.failIf('auth_cookie' in req.outcookie)
def test_known_cookie_ip_check_enabled(self):
self.env.config.set('trac', 'check_auth_ip', 'yes')
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='192.168.0.100', remote_user=None,
base_path='/trac.cgi')
self.assertEqual(None, self.module.authenticate(req))
self.failIf('trac_auth' not in req.outcookie)
def test_known_cookie_ip_check_disabled(self):
self.env.config.set('trac', 'check_auth_ip', 'no')
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(incookie=incookie, outcookie=outcookie,
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='192.168.0.100', remote_user=None)
self.assertEqual('john', self.module.authenticate(req))
self.failIf('auth_cookie' in req.outcookie)
def test_login(self):
outcookie = Cookie()
# remote_user must be upper case to test that by default, case is
# preserved.
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=outcookie,
remote_addr='127.0.0.1', remote_user='john',
authname='john', base_path='/trac.cgi')
self.module._do_login(req)
assert outcookie.has_key('trac_auth'), '"trac_auth" Cookie not set'
auth_cookie = outcookie['trac_auth'].value
self.assertEquals([('john', '127.0.0.1')], self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE cookie=%s",
(auth_cookie,)))
def test_login_ignore_case(self):
"""
Test that login is succesful when the usernames differ in case, but case
is ignored.
"""
self.env.config.set('trac', 'ignore_auth_case', 'yes')
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=outcookie,
remote_addr='127.0.0.1', remote_user='John',
authname='anonymous', base_path='/trac.cgi')
self.module._do_login(req)
assert outcookie.has_key('trac_auth'), '"trac_auth" Cookie not set'
auth_cookie = outcookie['trac_auth'].value
self.assertEquals([('john', '127.0.0.1')], self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE cookie=%s",
(auth_cookie,)))
def test_login_no_username(self):
req = Mock(incookie=Cookie(), href=Href('/trac.cgi'),
remote_addr='127.0.0.1', remote_user=None,
base_path='/trac.cgi')
self.assertRaises(TracError, self.module._do_login, req)
def test_already_logged_in_same_user(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(incookie=incookie, outcookie=Cookie(),
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user='john', authname='john')
self.module._do_login(req) # this shouldn't raise an error
def test_already_logged_in_different_user(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
req = Mock(incookie=incookie, authname='john',
href=Href('/trac.cgi'), base_path='/trac.cgi',
remote_addr='127.0.0.1', remote_user='tom')
self.assertRaises(AssertionError, self.module._do_login, req)
def test_logout(self):
self.env.db_transaction("""
INSERT INTO auth_cookie (cookie, name, ipnr)
VALUES ('123', 'john', '127.0.0.1')""")
incookie = Cookie()
incookie['trac_auth'] = '123'
outcookie = Cookie()
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=incookie, outcookie=outcookie,
remote_addr='127.0.0.1', remote_user=None, authname='john',
base_path='/trac.cgi')
self.module._do_logout(req)
self.failIf('trac_auth' not in outcookie)
self.failIf(self.env.db_query(
"SELECT name, ipnr FROM auth_cookie WHERE name='john'"))
def test_logout_not_logged_in(self):
req = Mock(cgi_location='/trac', href=Href('/trac.cgi'),
incookie=Cookie(), outcookie=Cookie(),
remote_addr='127.0.0.1', remote_user=None,
authname='anonymous', base_path='/trac.cgi')
self.module._do_logout(req) # this shouldn't raise an error
class BasicAuthenticationTestCase(unittest.TestCase):
def setUp(self):
filename = os.path.join(os.path.split(__file__)[0], 'htpasswd.txt')
self.auth = BasicAuthentication(filename, 'realm')
def tearDown(self):
self.auth = None
def test_crypt(self):
self.assert_(self.auth.test('crypt', 'crypt'))
self.assert_(not self.auth.test('crypt', 'other'))
def test_md5(self):
self.assert_(self.auth.test('md5', 'md5'))
self.assert_(not self.auth.test('md5', 'other'))
def test_sha(self):
self.assert_(self.auth.test('sha', 'sha'))
self.assert_(not self.auth.test('sha', 'other'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LoginModuleTestCase, 'test'))
suite.addTest(unittest.makeSuite(BasicAuthenticationTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/sourceforge2trac.py | trac/contrib/sourceforge2trac.py | """
Import a Sourceforge project's tracker items into a Trac database.
Requires:
Trac 1.0 from http://trac.edgewall.org/
Python 2.5 from http://www.python.org/
1.0 clean-up by cboos **untested**, use at your own risks and send patches
The Sourceforge tracker items can be exported from the 'Backup' page
of the project admin section. Substitute XXXXX with project id:
https://sourceforge.net/export/xml_export2.php?group_id=XXXXX
Initial version for Trac 0.7 and old artiface SF export format is
Copyright 2004, Mark Rowe <mrowe@bluewire.net.nz>
Version for Trac 0.11 and SF XML2 export format, completely rewritten
except TracDatabase class is
Copyright 2010, anatoly techtonik <techtonik@php.net>
HGID: 92fd15e8398c
$Id$
Uses Trac 0.11 DB format version 21
SourceForge XML Export format identified by the header:
<!DOCTYPE project_export SYSTEM "http://sourceforge.net/export/sf_project_export_0.2.dtd">
Works with all DB backends. Attachments are not downloaded, but inserted
as links to SF tracker.
Ticket Types, Priorities and Resolutions
----------------------------------------
Conversion kills default Trac ticket types:
- defect 1
- enhancement 2
- task 3
and priorities:
- blocker 1
- critical 2
- major 3
- minor 4
- trivial 5
and resolutions:
- fixed 1
- invalid 2
- wontfix 3
- duplicate 4
- worksforme 5
Versions and Milestones
-----------------------
Kills versions and milestones from existing Trac DB
Mapping
-------
tracker_name == ticket_type
group_name == version
category_name == component
user nobody == anonymous
Not implemented (feature:reason)
--------------------------------
attachments:made as a comment with links to attachments stored on SF
(type,id,filename,size,time,description,author,ipnr)
ticket_custom:unknown (ticket,name,value)
history:imported only for summary, priority. closed date and owner fields
severities:no field in source data
"""
#: rename users from SF to Trac
user_map = {"nobody":"anonymous"}
complete_msg = """
Conversion complete.
You may want to login into Trac to verify names for ticket owners. You may
also want to rename ticket types and priorities to default.
"""
from __future__ import with_statement
from xml.etree.ElementTree import ElementTree
import time
import sys
import trac.env
# --- utility
class DBNotEmpty(Exception):
def __str__(self):
return "Will not modify database with existing tickets!"
class FlatXML(object):
"""Flat XML is XML without element attributes. Also each element
may contain other elements or text, but not both.
This object mirrors XML structure into own properties for convenient
access to tree elements, i.e. flat.trackers[2].groups[2].group_name
Uses recursion.
"""
def __init__(self, el=None):
"""el is ElementTree element"""
if el:
self.merge(el)
def merge(self, el):
"""merge supplied ElementTree element into current object"""
for c in el:
if len(c.getchildren()) == 0:
if c.text != None and len(c.text.strip()) != 0:
self.__setattr__(c.tag, c.text)
else:
self.__setattr__(c.tag, [])
else: #if c.getchildren()[0].tag == c.tag[:-1]:
# c is a set of elements
self.__setattr__(c.tag, [FlatXML(x) for x in c.getchildren()])
def __str__(self):
buf = ""
for sub in self.__dict__:
val = self.__dict__[sub]
if type(val) != list:
buf += "%s : %s\n" % (sub, val)
else:
for x in val:
buf += "\n ".join(x.__str__().split("\n"))
return buf
def __repr__(self):
buf = ""
for sub in self.__dict__:
val = self.__dict__[sub]
if type(val) != list:
buf += "<%s>%s</%s>\n" % (sub, val, sub)
else:
for x in val:
buf += "\n ".join(x.__repr__().split("\n"))
return buf
# --- SF data model
class Tracker(FlatXML):
"""
<trackers>
<tracker>
<url>http://sourceforge.net/?group_id=175454&atid=873299</url>
<tracker_id>873299</tracker_id>
<name>Bugs</name>
<description>Bug Tracking System</description>
<is_public>All site users</is_public>
<allow_anon>Yes</allow_anon>
<email_updates>Send to goblinhack@gmail.com</email_updates>
<due_period>2592000</due_period>
<submit_instructions></submit_instructions>
<browse_instructions></browse_instructions>
<status_timeout>1209600</status_timeout>
<due_period_initial>0</due_period_initial>
<due_period_update>0</due_period_update>
<reopen_on_comment>1</reopen_on_comment>
<canned_responses>
</canned_responses>
<groups>
<group>
<id>632324</id>
<group_name>v1.0 (example)</group_name>
</group>
</groups>
<categories>
<category>
<id>885178</id>
<category_name>Interface (example)</category_name>
<auto_assignee>nobody</auto_assignee>
</category>
</categories>
<resolutions>
<resolution>
<id>1</id>
<name>Fixed</name>
</resolution>
<resolution>
<id>2</id>
<name>Invalid</name>
</resolution>
...
</resolutions>
<statuses>
<status>
<id>1</id>
<name>Open</name>
</status>
<status>
<id>2</id>
<name>Closed</name>
</status>
<status>
<id>3</id>
<name>Deleted</name>
</status>
<status>
<id>4</id>
<name>Pending</name>
</status>
</statuses>
...
<tracker_items>
<tracker_item>
<url>http://sourceforge.net/support/tracker.php?aid=2471428</url>
<id>2471428</id>
<status_id>2</status_id>
<category_id>100</category_id>
<group_id>100</group_id>
<resolution_id>100</resolution_id>
<submitter>sbluen</submitter>
<assignee>nobody</assignee>
<closer>goblinhack</closer>
<submit_date>1230400444</submit_date>
<close_date>1231087612</close_date>
<priority>5</priority>
<summary>glitch with edge of level</summary>
<details>The mini-laser that the future soldier carries is so powerful that it even lets me go outside the level. I stand at the top edge of the level and then shoot up, and then it gets me somewhere where I am not supposed to go.</details>
<is_private>0</is_private>
<followups>
<followup>
<id>2335316</id>
<submitter>goblinhack</submitter>
<date>1175610236</date>
<details>Logged In: YES
user_id=1577972
Originator: NO
does this happen every game or just once?
you could send me the saved file and I'll try and load it - old
versions harldy ever work with newer versions - need to add some
kind of warnings on that
tx</details>
</followup>
...
</followups>
<attachments>
<attachment>
<url>http://sourceforge.net/tracker/download.php?group_id=175454&atid=873299&file_id=289080&aid=</url>
<id>289080</id>
<filename>your_most_recent_game.gz</filename>
<description>my saved game</description>
<filesize>112968</filesize>
<filetype>application/x-gzip</filetype>
<date>1218987770</date>
<submitter>sbluen</submitter>
</attachment>
...
</attachments>
<history_entries>
<history_entry>
<id>7304242</id>
<field_name>IP</field_name>
<old_value>Artifact Created: 76.173.48.148</old_value>
<date>1230400444</date>
<updator>sbluen</updator>
</history_entry>
...
</history_entries>
</tracker_item>
...
</tracker_items>
...
</tracker>
</trackers>
"""
def __init__(self, e):
self.merge(e)
class ExportedProjectData(object):
"""Project data container as Python object.
"""
def __init__(self, f):
"""Data parsing"""
self.trackers = [] #: tracker properties and data
self.groups = [] #: groups []
self.priorities = [] #: priorities used
self.resolutions = [] #: resolutions (index, name)
self.tickets = [] #: all tickets
self.statuses = [] #: status (idx, name)
self.used_resolutions = {} #: id:name
self.used_categories = {} #: id:name
# id '100' means no category
self.used_categories['100'] = None
self.users = {} #: id:name
root = ElementTree().parse(f)
self.users = dict([(FlatXML(u).userid, FlatXML(u).username)
for u in root.find('referenced_users')])
for tracker in root.find('trackers'):
tr = Tracker(tracker)
self.trackers.append(tr)
# groups-versions
for grp in tr.groups:
# group ids are tracker-specific even if names match
g = (grp.id, grp.group_name)
if g not in self.groups:
self.groups.append(g)
# resolutions
for res in tr.resolutions:
r = (res.id, res.name)
if r not in self.resolutions:
self.resolutions.append(r)
# statuses
self.statuses = [(s.id, s.name) for s in tr.statuses]
# tickets
for tck in tr.tracker_items:
if type(tck) == str: print repr(tck)
self.tickets.append(tck)
if int(tck.priority) not in self.priorities:
self.priorities.append(int(tck.priority))
res_id = getattr(tck, "resolution_id", None)
if res_id is not None and res_id not in self.used_resolutions:
for idx, name in self.resolutions:
if idx == res_id: break
self.used_resolutions[res_id] = \
dict(self.resolutions)[res_id]
# used categories
categories = dict(self.get_categories(tr, noowner=True))
if tck.category_id not in self.used_categories:
self.used_categories[tck.category_id] = \
categories[tck.category_id]
# sorting everything
self.trackers.sort(key=lambda x:x.name)
self.groups.sort()
self.priorities.sort()
def get_categories(self, tracker=None, noid=False, noowner=False):
""" SF categories : Trac components
(id, name, owner) tuples for specified tracker or all trackers
if noid or noowner flags are set, specified tuple attribute is
stripped
"""
trs = [tracker] if tracker is not None else self.trackers
categories = []
for tr in trs:
for cat in tr.categories:
c = (cat.id, cat.category_name, cat.auto_assignee)
if c not in categories:
categories.append(c)
#: sort by name
if noid:
categories.sort()
else:
categories.sort(key=lambda x:x[1])
if noowner:
categories = [x[:2] for x in categories]
if noid:
categories = [x[1:] for x in categories]
return categories
class TracDatabase(object):
def __init__(self, path):
self.env = trac.env.Environment(path)
def hasTickets(self):
return int(self.env.db_query("SELECT count(*) FROM ticket")[0][0]) > 0
def dbCheck(self):
if self.hasTickets():
raise DBNotEmpty
def setTypeList(self, s):
"""Remove all types, set them to `s`"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='ticket_type'")
for i, value in enumerate(s):
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("ticket_type", value, i))
def setPriorityList(self, s):
"""Remove all priorities, set them to `s`"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='priority'")
for i, value in enumerate(s):
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("priority", value, i))
def setResolutionList(self, t):
"""Remove all resolutions, set them to `t` (index, name)"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='resolution'")
for value, name in t:
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("resolution", name, value))
def setComponentList(self, t):
"""Remove all components, set them to `t` (name, owner)"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM component")
for name, owner in t:
db("INSERT INTO component (name, owner) VALUES (%s, %s)",
(name, owner))
def setVersionList(self, v):
"""Remove all versions, set them to `v`"""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM version")
for value in v:
# time and description are also available
db("INSERT INTO version (name) VALUES (%s)", value)
def setMilestoneList(self, m):
"""Remove all milestones, set them to `m` ("""
self.dbCheck()
with self.env.db_transaction as db:
db("DELETE FROM milestone")
for value in m:
# due, completed, description are also available
db("INSERT INTO milestone (name) VALUES (%s)", value)
def addTicket(self, type, time, changetime, component,
priority, owner, reporter, cc,
version, milestone, status, resolution,
summary, description, keywords):
""" ticket table db21.py format
id integer PRIMARY KEY,
type text, -- the nature of the ticket
time integer, -- the time it was created
changetime integer,
component text,
severity text,
priority text,
owner text, -- who is this ticket assigned to
reporter text,
cc text, -- email addresses to notify
version text, --
milestone text, --
status text,
resolution text,
summary text, -- one-line summary
description text, -- problem description (long)
keywords text
"""
if status.lower() == 'open':
if owner != '':
status = 'assigned'
else:
status = 'new'
with self.env.db_transaction as db:
c = db.cursor()
c.execute("""
INSERT INTO ticket (type, time, changetime, component,
priority, owner, reporter, cc, version,
milestone, status, resolution, summary,
description, keywords)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s)
""", (type, time, changetime, component, priority, owner,
reporter, cc, version, milestone, status.lower(),
resolution, summary, '%s' % description, keywords))
return db.get_last_id(c, 'ticket')
def addTicketComment(self, ticket, time, author, value):
with self.env.db_transaction as db:
db("""
INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, time, author, 'comment', '', '%s' % value))
def addTicketChange(self, ticket, time, author, field, oldvalue, newvalue):
with self.env.db_transaction as db:
db("""INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, time, author, field, oldvalue, newvalue))
def importData(f, env, opt):
project = ExportedProjectData(f)
trackers = project.trackers
trac = TracDatabase(env)
# Data conversion
typeList = [x.name for x in trackers]
print("%d trackers will be converted to the following ticket types:\n %s" \
% (len(trackers), typeList))
used_cat_names = set(project.used_categories.values())
#: make names unique, forget about competing owners (the last one wins)
components = dict(project.get_categories(noid=True)).items()
components.sort()
components = [x for x in components if x[0] in used_cat_names]
print "%d out of %d categories are used and will be converted to the following components:\n %s" \
% (len(components), len(project.get_categories()), components)
print "..renaming component owners:"
for i,c in enumerate(components):
if c[1] in user_map:
components[i] = (c[0], user_map[c[1]])
print " %s" % components
print "%d groups which will be converted to the following versions:\n %s" \
% (len(project.groups), project.groups)
print "%d resolutions found :\n %s" \
% (len(project.resolutions), project.resolutions)
resolutions = [(k,project.used_resolutions[k])
for k in project.used_resolutions]
resolutions.sort(key=lambda x:int(x[0]))
print ".. only %d used will be imported:\n %s" \
% (len(resolutions), resolutions)
print "Priorities used so far: %s" % project.priorities
if not(raw_input("Continue [y/N]?").lower() == 'y'):
sys.exit()
# Data save
trac.setTypeList(typeList)
trac.setComponentList(components)
trac.setPriorityList(range(min(project.priorities),
max(project.priorities)))
trac.setVersionList(set([x[1] for x in project.groups]))
trac.setResolutionList(resolutions)
trac.setMilestoneList([])
for tracker in project.trackers:
# id 100 means no component selected
component_lookup = dict(project.get_categories(noowner=True) +
[("100", None)])
for t in tracker.tracker_items:
i = trac.addTicket(type=tracker.name,
time=int(t.submit_date),
changetime=int(t.submit_date),
component=component_lookup[t.category_id],
priority=t.priority,
owner=t.assignee \
if t.assignee not in user_map \
else user_map[t.assignee],
reporter=t.submitter \
if t.submitter not in user_map \
else user_map[t.submitter],
cc=None,
# 100 means no group selected
version=dict(project.groups +
[("100", None)])[t.group_id],
milestone=None,
status=dict(project.statuses)[t.status_id],
resolution=dict(resolutions)[t.resolution_id] \
if hasattr(t, "resolution_id") else None,
summary=t.summary,
description=t.details,
keywords='sf' + t.id)
print 'Imported %s as #%d' % (t.id, i)
if len(t.attachments):
attmsg = "SourceForge attachments:\n"
for a in t.attachments:
attmsg = attmsg + " * [%s %s] (%s) - added by '%s' %s [[BR]] "\
% (a.url+t.id, a.filename, a.filesize+" bytes",
user_map.get(a.submitter, a.submitter),
time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(int(a.date))))
attmsg = attmsg + "''%s ''\n" % (a.description or '')
# empty description is as empty list
trac.addTicketComment(ticket=i,
time=time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime(int(t.submit_date))),
author=None, value=attmsg)
print ' added information about %d attachments for #%d' % \
(len(t.attachments), i)
for msg in t.followups:
"""
<followup>
<id>3280792</id>
<submitter>goblinhack</submitter>
<date>1231087739</date>
<details>done</details>
</followup>
"""
trac.addTicketComment(ticket=i,
time=msg.date,
author=msg.submitter,
value=msg.details)
if t.followups:
print ' imported %d messages for #%d' % (len(t.followups), i)
# Import history
"""
<history_entry>
<id>4452195</id>
<field_name>resolution_id</field_name>
<old_value>100</old_value>
<date>1176043865</date>
<updator>goblinhack</updator>
</history_entry>
"""
revision = t.__dict__.copy()
# iterate the history in reverse order and update ticket revision from
# current (last) to initial
changes = 0
for h in sorted(t.history_entries, reverse=True):
"""
Processed fields (field - notes):
IP - no target field, just skip
summary
priority
close_date
assigned_to
Fields not processed (field: explanation):
File Added - TODO
resolution_id - need to update used_resolutions
status_id
artifact_group_id
category_id
group_id
"""
f = None
if h.field_name in ("IP",):
changes += 1
continue
elif h.field_name in ("summary", "priority"):
f = h.field_name
oldvalue = h.old_value
newvalue = revision.get(h.field_name, None)
elif h.field_name == 'assigned_to':
f = "owner"
newvalue = revision['assignee']
if h.old_value == '100': # was not assigned
revision['assignee'] = None
oldvalue = None
else:
username = project.users[h.old_value]
if username in user_map: username = user_map[username]
revision['assignee'] = oldvalue = username
elif h.field_name == 'close_date' and revision['close_date'] != 0:
f = 'status'
oldvalue = 'assigned'
newvalue = 'closed'
if f:
changes += 1
trac.addTicketChange(ticket=i,
time=h.date,
author=h.updator,
field=f,
oldvalue=oldvalue,
newvalue=newvalue)
if h.field_name != 'assigned_to':
revision[h.field_name] = h.old_value
if changes:
print ' processed %d out of %d history items for #%d' % \
(changes, len(t.history_entries), i)
def main():
import optparse
p = optparse.OptionParser(
"Usage: %prog xml_export.xml /path/to/trac/environment")
opt, args = p.parse_args()
if len(args) != 2:
p.error("Incorrect number of arguments")
try:
importData(open(args[0]), args[1], opt)
except DBNotEmpty, e:
print 'Error:', e
sys.exit(1)
print complete_msg
if __name__ == '__main__':
main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/emailfilter.py | trac/contrib/emailfilter.py | #!/usr/bin/python
"""
emailfilter.py -- Email tickets to Trac.
A simple MTA filter to create Trac tickets from inbound emails.
Copyright 2005, Daniel Lundin <daniel@edgewall.com>
Copyright 2005, Edgewall Software
Please note:
This is only a starting point. See
* http://trac.edgewall.org/ticket/5327 and
* http://trac-hacks.org/intertrac/EmailtoTracScript
for more complete and advanced examples.
The scripts reads emails from stdin and inserts directly into a Trac database.
MIME headers are mapped as follows:
* From: => Reporter
* Subject: => Summary
* Body => Description
How to use
----------
* Set TRAC_ENV_PATH to the path of your project's Trac environment
* Configure script as a mail (pipe) filter with your MTA
typically, this involves adding a line like this to /etc/aliases:
somename: |/path/to/email2trac.py
Check your MTA's documentation for specifics.
Todo
----
* Configure target database through env variable?
* Handle/discard HTML parts
* Attachment support
"""
TRAC_ENV_PATH = '/var/trac/test'
import email
import sys
from trac.env import Environment
from trac.ticket import Ticket
class TicketEmailParser(object):
env = None
def __init__(self, env):
self.env = env
def parse(self, fp):
msg = email.message_from_file(fp)
tkt = Ticket(self.env)
tkt['status'] = 'new'
tkt['reporter'] = msg['from']
tkt['summary'] = msg['subject']
for part in msg.walk():
if part.get_content_type() == 'text/plain':
tkt['description'] = part.get_payload(decode=1).strip()
if tkt.values.get('description'):
tkt.insert()
if __name__ == '__main__':
env = Environment(TRAC_ENV_PATH, create=0)
tktparser = TicketEmailParser(env)
tktparser.parse(sys.stdin)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/checkwiki.py | trac/contrib/checkwiki.py | #!/usr/bin/python
#
# Check/update default wiki pages from the Trac project website.
#
# Note: This is a development tool used in Trac packaging/QA, not something
# particularly useful for end-users.
#
# Author: Daniel Lundin <daniel@edgewall.com>
import httplib
import re
import sys
import getopt
# Pages to include in distribution
wiki_pages = [
"CamelCase",
"InterMapTxt",
"InterTrac",
"InterWiki",
"PageTemplates",
"RecentChanges",
"TitleIndex",
"TracAccessibility",
"TracAdmin",
"TracBackup",
"TracBatchModify",
"TracBrowser",
"TracCgi",
"TracChangeset",
"TracEnvironment",
"TracFastCgi",
"TracFineGrainedPermissions",
"TracGuide",
"TracImport",
"TracIni",
"TracInstall",
"TracInterfaceCustomization",
"TracLinks",
"TracLogging",
"TracModPython",
"TracModWSGI",
"TracNavigation",
"TracNotification",
"TracPermissions",
"TracPlugins",
"TracQuery",
"TracReports",
"TracRepositoryAdmin",
"TracRevisionLog",
"TracRoadmap",
"TracRss",
"TracSearch",
"TracStandalone",
"TracSupport",
"TracSyntaxColoring",
"TracTickets",
"TracTicketsCustomFields",
"TracTimeline",
"TracUnicode",
"TracUpgrade",
"TracWiki",
"TracWorkflow",
"WikiDeletePage",
"WikiFormatting",
"WikiHtml",
"WikiMacros",
"WikiNewPage",
"WikiPageNames",
"WikiProcessors",
"WikiRestructuredText",
"WikiRestructuredTextLinks"
]
def get_page_from_file(prefix, pname):
d = ''
try:
f = open(pname ,'r')
d = f.read()
f.close()
except Exception:
print "Missing page: %s" % pname
return d
def get_page_from_web(prefix, pname):
host = "trac.edgewall.org"
rfile = "/wiki/%s%s?format=txt" % (prefix, pname)
c = httplib.HTTPConnection(host)
c.request("GET", rfile)
print "Getting", rfile
r = c.getresponse()
d = r.read()
if r.status == 200 and d:
f = open(pname, 'w+')
f.write(d.replace('\r\n', '\n'))
f.close()
else:
print "Missing or empty page"
c.close()
return d
def check_links(data):
def get_refs(t, refs=[]):
r = "(?P<wikilink>(^|(?<=[^A-Za-z]))[!]?[A-Z][a-z/]+(?:[A-Z][a-z/]+)+)"
m = re.search (r, t)
if not m:
refs.sort()
result = []
orf = None
for rf in refs:
if rf != orf:
result.append(rf)
orf = rf
return result
refs.append(m.group())
return get_refs( t[m.end():], refs)
for p in data.keys():
links = get_refs(data[p], [])
for l in links:
if l not in data.keys():
print "Broken link: %s -> %s" % (p, l)
if __name__ == '__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], "dCp:")
except getopt.GetoptError:
# print help information and exit:
print "%s [-d] [-C] [-p prefix] [PAGE ...]" % sys.argv[0]
print "\t-d -- Download pages from the main project wiki."
print "\t-C -- Try to check links (currently broken)"
print "\t-p prefix -- When downloading, prepend 'prefix/' to the page."
sys.exit()
get_page = get_page_from_file
prefix = ''
check = False
for o,a in opts:
if o == '-d':
get_page = get_page_from_web
elif o == '-p':
prefix = a+'/'
elif o == '-C':
check = True
data = {}
for p in args or wiki_pages:
data[p] = get_page(prefix, p)
if check:
check_links(data)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/l10n_revert_lineno_conflicts.py | trac/contrib/l10n_revert_lineno_conflicts.py | # Copyright (C) 2013 Edgewall Software
# This file is distributed under the same license as the Trac project.
"""
L10N tool which takes a list of .po in conflicted state and revert
ignorable changes.
It resolve the conflicts for which "theirs" changes consist solely of
line number changes, by reverting to the working copy content.
This makes it easier to merge translation .po files across branches.
"""
import re
ignore_lineno_re = re.compile(r'''
<<<< .* \n
( (?: [^=] .* \n )+) # \1 == "working copy"
==== .* \n
( (?: \# .* \n )+) # \2 == comment only for "theirs"
>>>> .* \n
''', re.MULTILINE | re.VERBOSE)
def sanitize_file(path):
with file(path, 'rb+') as f:
sanitized, nsub = ignore_lineno_re.subn(r'\1', f.read())
if nsub:
print("reverted %d ignorable changes in %s" % (nsub, path))
f.seek(0)
f.write(sanitized)
f.truncate()
else:
print("no ignorable changes in %s" % (path,))
if __name__ == '__main__':
import sys
for path in sys.argv[1:]:
sanitize_file(path)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/bugzilla2trac.py | trac/contrib/bugzilla2trac.py | #!/usr/bin/env python
"""
Import a Bugzilla items into a Trac database.
Requires: Trac 0.9b1 from http://trac.edgewall.org/
Python 2.3 from http://www.python.org/
MySQL >= 3.23 from http://www.mysql.org/
or PostGreSQL 8.4 from http://www.postgresql.org/
or SQLite 3 from http://www.sqlite.org/
Thanks: Mark Rowe <mrowe@bluewire.net.nz>
for original TracDatabase class
Copyright 2004, Dmitry Yusupov <dmitry_yus@yahoo.com>
Many enhancements, Bill Soudan <bill@soudan.net>
Other enhancements, Florent Guillaume <fg@nuxeo.com>
Reworked, Jeroen Ruigrok van der Werven <asmodai@in-nomine.org>
Jeff Moreland <hou5e@hotmail.com>
$Id: bugzilla2trac.py 11490 2013-01-13 15:18:06Z rblank $
"""
from __future__ import with_statement
import re
###
### Conversion Settings -- edit these before running if desired
###
# Bugzilla version. You can find this in Bugzilla's globals.pl file.
#
# Currently, the following bugzilla versions are known to work:
# 2.11 (2110), 2.16.5 (2165), 2.16.7 (2167), 2.18.3 (2183), 2.19.1 (2191),
# 2.23.3 (2233), 3.04.4 (3044)
#
# If you run this script on a version not listed here and it is successful,
# please file a ticket at http://trac.edgewall.org
#
BZ_VERSION = 3044
# MySQL connection parameters for the Bugzilla database. These can also
# be specified on the command line.
BZ_DB = ""
BZ_HOST = ""
BZ_USER = ""
BZ_PASSWORD = ""
# Path to the Trac environment.
TRAC_ENV = "/usr/local/trac"
# If true, all existing Trac tickets and attachments will be removed
# prior to import.
TRAC_CLEAN = True
# Enclose imported ticket description and comments in a {{{ }}}
# preformat block? This formats the text in a fixed-point font.
PREFORMAT_COMMENTS = False
# Replace bug numbers in comments with #xyz
REPLACE_BUG_NO = False
# Severities
SEVERITIES = [
("blocker", "1"),
("critical", "2"),
("major", "3"),
("normal", "4"),
("minor", "5"),
("trivial", "6")
]
# Priorities
# If using the default Bugzilla priorities of P1 - P5, do not change anything
# here.
# If you have other priorities defined please change the P1 - P5 mapping to
# the order you want. You can also collapse multiple priorities on bugzilla's
# side into the same priority on Trac's side, simply adjust PRIORITIES_MAP.
PRIORITIES = [
("highest", "1"),
("high", "2"),
("normal", "3"),
("low", "4"),
("lowest", "5")
]
# Bugzilla: Trac
# NOTE: Use lowercase.
PRIORITIES_MAP = {
"p1": "highest",
"p2": "high",
"p3": "normal",
"p4": "low",
"p5": "lowest"
}
# By default, all bugs are imported from Bugzilla. If you add a list
# of products here, only bugs from those products will be imported.
PRODUCTS = []
# These Bugzilla products will be ignored during import.
IGNORE_PRODUCTS = []
# These milestones are ignored
IGNORE_MILESTONES = ["---"]
# Don't import user names and passwords into htpassword if
# user is disabled in bugzilla? (i.e. profiles.DisabledText<>'')
IGNORE_DISABLED_USERS = True
# These logins are converted to these user ids
LOGIN_MAP = {
#'some.user@example.com': 'someuser',
}
# These emails are removed from CC list
IGNORE_CC = [
#'loser@example.com',
]
# The 'component' field in Trac can come either from the Product or
# or from the Component field of Bugzilla. COMPONENTS_FROM_PRODUCTS
# switches the behavior.
# If COMPONENTS_FROM_PRODUCTS is True:
# - Bugzilla Product -> Trac Component
# - Bugzilla Component -> Trac Keyword
# IF COMPONENTS_FROM_PRODUCTS is False:
# - Bugzilla Product -> Trac Keyword
# - Bugzilla Component -> Trac Component
COMPONENTS_FROM_PRODUCTS = False
# If COMPONENTS_FROM_PRODUCTS is True, the default owner for each
# Trac component is inferred from a default Bugzilla component.
DEFAULT_COMPONENTS = ["default", "misc", "main"]
# This mapping can assign keywords in the ticket entry to represent
# products or components (depending on COMPONENTS_FROM_PRODUCTS).
# The keyword will be ignored if empty.
KEYWORDS_MAPPING = {
#'Bugzilla_product_or_component': 'Keyword',
"default": "",
"misc": "",
}
# If this is True, products or components are all set as keywords
# even if not mentionned in KEYWORDS_MAPPING.
MAP_ALL_KEYWORDS = True
# Custom field mappings
CUSTOMFIELD_MAP = {
#'Bugzilla_field_name': 'Trac_customfield_name',
#'op_sys': 'os',
#'cf_featurewantedby': 'wanted_by',
#'product': 'product'
}
# Bug comments that should not be imported. Each entry in list should
# be a regular expression.
IGNORE_COMMENTS = [
"^Created an attachment \(id="
]
###########################################################################
### You probably don't need to change any configuration past this line. ###
###########################################################################
# Bugzilla status to Trac status translation map.
#
# NOTE: bug activity is translated as well, which may cause bug
# activity to be deleted (e.g. resolved -> closed in Bugzilla
# would translate into closed -> closed in Trac, so we just ignore the
# change).
#
# There is some special magic for open in the code: if there is no
# Bugzilla owner, open is mapped to 'new' instead.
STATUS_TRANSLATE = {
"unconfirmed": "new",
"open": "assigned",
"resolved": "closed",
"verified": "closed",
"released": "closed"
}
# Translate Bugzilla statuses into Trac keywords. This provides a way
# to retain the Bugzilla statuses in Trac. e.g. when a bug is marked
# 'verified' in Bugzilla it will be assigned a VERIFIED keyword.
STATUS_KEYWORDS = {
"verified": "VERIFIED",
"released": "RELEASED"
}
# Some fields in Bugzilla do not have equivalents in Trac. Changes in
# fields listed here will not be imported into the ticket change history,
# otherwise you'd see changes for fields that don't exist in Trac.
IGNORED_ACTIVITY_FIELDS = ["everconfirmed"]
# Regular expression and its replacement
# this expression will update references to bugs 1 - 99999 that
# have the form "bug 1" or "bug #1"
BUG_NO_RE = re.compile(r"\b(bug #?)([0-9]{1,5})\b", re.I)
BUG_NO_REPL = r"#\2"
###
### Script begins here
###
import os
import sys
import string
import StringIO
import MySQLdb
import MySQLdb.cursors
from trac.attachment import Attachment
from trac.env import Environment
if not hasattr(sys, 'setdefaultencoding'):
reload(sys)
sys.setdefaultencoding('latin1')
# simulated Attachment class for trac.add
#class Attachment:
# def __init__(self, name, data):
# self.filename = name
# self.file = StringIO.StringIO(data.tostring())
# simple field translation mapping. if string not in
# mapping, just return string, otherwise return value
class FieldTranslator(dict):
def __getitem__(self, item):
if not dict.has_key(self, item):
return item
return dict.__getitem__(self, item)
statusXlator = FieldTranslator(STATUS_TRANSLATE)
class TracDatabase(object):
def __init__(self, path):
self.env = Environment(path)
self.loginNameCache = {}
self.fieldNameCache = {}
from trac.db.api import DatabaseManager
self.using_postgres = \
DatabaseManager(self.env).connection_uri.startswith("postgres:")
def hasTickets(self):
return int(self.env.db_query("SELECT count(*) FROM ticket")[0][0] > 0)
def assertNoTickets(self):
if self.hasTickets():
raise Exception("Will not modify database with existing tickets!")
def setSeverityList(self, s):
"""Remove all severities, set them to `s`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='severity'")
for value, i in s:
print " inserting severity '%s' - '%s'" % (value, i)
db("""INSERT INTO enum (type, name, value)
VALUES (%s, %s, %s)""",
("severity", value, i))
def setPriorityList(self, s):
"""Remove all priorities, set them to `s`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM enum WHERE type='priority'")
for value, i in s:
print " inserting priority '%s' - '%s'" % (value, i)
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
("priority", value, i))
def setComponentList(self, l, key):
"""Remove all components, set them to `l`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM component")
for comp in l:
print " inserting component '%s', owner '%s'" % \
(comp[key], comp['owner'])
db("INSERT INTO component (name, owner) VALUES (%s, %s)",
(comp[key], comp['owner']))
def setVersionList(self, v, key):
"""Remove all versions, set them to `v`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM version")
for vers in v:
print " inserting version '%s'" % (vers[key])
db("INSERT INTO version (name) VALUES (%s)",
(vers[key],))
def setMilestoneList(self, m, key):
"""Remove all milestones, set them to `m`"""
self.assertNoTickets()
with self.env.db_transaction as db:
db("DELETE FROM milestone")
for ms in m:
milestone = ms[key]
print " inserting milestone '%s'" % (milestone)
db("INSERT INTO milestone (name) VALUES (%s)",
(milestone,))
def addTicket(self, id, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords, customfields):
desc = description
type = "defect"
if SEVERITIES:
if severity.lower() == "enhancement":
severity = "minor"
type = "enhancement"
else:
if priority.lower() == "enhancement":
priority = "minor"
type = "enhancement"
if PREFORMAT_COMMENTS:
desc = '{{{\n%s\n}}}' % desc
if REPLACE_BUG_NO:
if BUG_NO_RE.search(desc):
desc = re.sub(BUG_NO_RE, BUG_NO_REPL, desc)
if PRIORITIES_MAP.has_key(priority):
priority = PRIORITIES_MAP[priority]
print " inserting ticket %s -- %s" % (id, summary)
with self.env.db_transaction as db:
db("""INSERT INTO ticket (id, type, time, changetime, component,
severity, priority, owner, reporter, cc,
version, milestone, status, resolution,
summary, description, keywords)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s,
%s, %s, %s, %s)
""", (id, type, datetime2epoch(time),
datetime2epoch(changetime), component, severity,
priority, owner, reporter, cc, version, milestone,
status.lower(), resolution, summary, desc, keywords))
if self.using_postgres:
with self.env.db_transaction as db:
c = db.cursor()
c.execute("""
SELECT SETVAL('ticket_id_seq', MAX(id)) FROM ticket;
SELECT SETVAL('report_id_seq', MAX(id)) FROM report""")
ticket_id = db.get_last_id(c, 'ticket')
# add all custom fields to ticket
for name, value in customfields.iteritems():
self.addTicketCustomField(ticket_id, name, value)
return ticket_id
def addTicketCustomField(self, ticket_id, field_name, field_value):
if field_value == None:
return
self.env.db_transaction("""
INSERT INTO ticket_custom (ticket, name, value) VALUES (%s, %s, %s)
""", (ticket_id, field_name, field_value))
def addTicketComment(self, ticket, time, author, value):
comment = value
if PREFORMAT_COMMENTS:
comment = '{{{\n%s\n}}}' % comment
if REPLACE_BUG_NO:
if BUG_NO_RE.search(comment):
comment = re.sub(BUG_NO_RE, BUG_NO_REPL, comment)
with self.env.db_transaction as db:
db("""INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, datetime2epoch(time), author, 'comment', '',
comment))
def addTicketChange(self, ticket, time, author, field, oldvalue, newvalue):
if field == "owner":
if LOGIN_MAP.has_key(oldvalue):
oldvalue = LOGIN_MAP[oldvalue]
if LOGIN_MAP.has_key(newvalue):
newvalue = LOGIN_MAP[newvalue]
if field == "priority":
if PRIORITIES_MAP.has_key(oldvalue.lower()):
oldvalue = PRIORITIES_MAP[oldvalue.lower()]
if PRIORITIES_MAP.has_key(newvalue.lower()):
newvalue = PRIORITIES_MAP[newvalue.lower()]
# Doesn't make sense if we go from highest -> highest, for example.
if oldvalue == newvalue:
return
with self.env.db_transaction as db:
db("""INSERT INTO ticket_change (ticket, time, author, field,
oldvalue, newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (ticket, datetime2epoch(time), author, field,
oldvalue, newvalue))
def addAttachment(self, author, a):
if a['filename'] != '':
description = a['description']
id = a['bug_id']
filename = a['filename']
filedata = StringIO.StringIO(a['thedata'])
filesize = len(filedata.getvalue())
time = a['creation_ts']
print " ->inserting attachment '%s' for ticket %s -- %s" % \
(filename, id, description)
attachment = Attachment(self.env, 'ticket', id)
attachment.author = author
attachment.description = description
attachment.insert(filename, filedata, filesize,
datetime2epoch(time))
del attachment
def getLoginName(self, cursor, userid):
if userid not in self.loginNameCache:
cursor.execute("SELECT * FROM profiles WHERE userid = %s", (userid))
loginName = cursor.fetchall()
if loginName:
loginName = loginName[0]['login_name']
else:
print """WARNING: unknown bugzilla userid %d, recording as
anonymous""" % (userid)
loginName = "anonymous"
loginName = LOGIN_MAP.get(loginName, loginName)
self.loginNameCache[userid] = loginName
return self.loginNameCache[userid]
def getFieldName(self, cursor, fieldid):
if fieldid not in self.fieldNameCache:
# fielddefs.fieldid got changed to fielddefs.id in Bugzilla
# 2.23.3.
if BZ_VERSION >= 2233:
cursor.execute("SELECT * FROM fielddefs WHERE id = %s",
(fieldid))
else:
cursor.execute("SELECT * FROM fielddefs WHERE fieldid = %s",
(fieldid))
fieldName = cursor.fetchall()
if fieldName:
fieldName = fieldName[0]['name'].lower()
else:
print "WARNING: unknown bugzilla fieldid %d, \
recording as unknown" % (userid)
fieldName = "unknown"
self.fieldNameCache[fieldid] = fieldName
return self.fieldNameCache[fieldid]
def makeWhereClause(fieldName, values, negative=False):
if not values:
return ''
if negative:
connector, op = ' AND ', '!='
else:
connector, op = ' OR ', '='
clause = connector.join(["%s %s '%s'" % (fieldName, op, value)
for value in values])
return ' (' + clause + ')'
def convert(_db, _host, _user, _password, _env, _force):
activityFields = FieldTranslator()
# account for older versions of bugzilla
print "Using Bugzilla v%s schema." % BZ_VERSION
if BZ_VERSION == 2110:
activityFields['removed'] = "oldvalue"
activityFields['added'] = "newvalue"
# init Bugzilla environment
print "Bugzilla MySQL('%s':'%s':'%s':'%s'): connecting..." % \
(_db, _host, _user, ("*" * len(_password)))
mysql_con = MySQLdb.connect(host=_host,
user=_user, passwd=_password, db=_db, compress=1,
cursorclass=MySQLdb.cursors.DictCursor,
charset='utf8')
mysql_cur = mysql_con.cursor()
# init Trac environment
print "Trac SQLite('%s'): connecting..." % (_env)
trac = TracDatabase(_env)
# force mode...
if _force == 1:
print "\nCleaning all tickets..."
with trac.env.db_transaction as db:
db("DELETE FROM ticket_change")
db("DELETE FROM ticket")
db("DELETE FROM ticket_custom")
db("DELETE FROM attachment")
attachments_dir = os.path.join(os.path.normpath(trac.env.path),
"attachments")
# Straight from the Python documentation.
for root, dirs, files in os.walk(attachments_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
if not os.stat(attachments_dir):
os.mkdir(attachments_dir)
print "All tickets cleaned..."
print "\n0. Filtering products..."
if BZ_VERSION >= 2180:
mysql_cur.execute("SELECT name FROM products")
else:
mysql_cur.execute("SELECT product AS name FROM products")
products = []
for line in mysql_cur.fetchall():
product = line['name']
if PRODUCTS and product not in PRODUCTS:
continue
if product in IGNORE_PRODUCTS:
continue
products.append(product)
PRODUCTS[:] = products
print " Using products", " ".join(PRODUCTS)
print "\n1. Import severities..."
trac.setSeverityList(SEVERITIES)
print "\n2. Import components..."
if not COMPONENTS_FROM_PRODUCTS:
if BZ_VERSION >= 2180:
sql = """SELECT DISTINCT c.name AS name, c.initialowner AS owner
FROM components AS c, products AS p
WHERE c.product_id = p.id AND"""
sql += makeWhereClause('p.name', PRODUCTS)
else:
sql = "SELECT value AS name, initialowner AS owner FROM components"
sql += " WHERE" + makeWhereClause('program', PRODUCTS)
mysql_cur.execute(sql)
components = mysql_cur.fetchall()
for component in components:
component['owner'] = trac.getLoginName(mysql_cur,
component['owner'])
trac.setComponentList(components, 'name')
else:
if BZ_VERSION >= 2180:
sql = ("SELECT p.name AS product, c.name AS comp, "
" c.initialowner AS owner "
"FROM components c, products p "
"WHERE c.product_id = p.id AND" +
makeWhereClause('p.name', PRODUCTS))
else:
sql = ("SELECT program AS product, value AS comp, "
" initialowner AS owner "
"FROM components WHERE" +
makeWhereClause('program', PRODUCTS))
mysql_cur.execute(sql)
lines = mysql_cur.fetchall()
all_components = {} # product -> components
all_owners = {} # product, component -> owner
for line in lines:
product = line['product']
comp = line['comp']
owner = line['owner']
all_components.setdefault(product, []).append(comp)
all_owners[(product, comp)] = owner
component_list = []
for product, components in all_components.items():
# find best default owner
default = None
for comp in DEFAULT_COMPONENTS:
if comp in components:
default = comp
break
if default is None:
default = components[0]
owner = all_owners[(product, default)]
owner_name = trac.getLoginName(mysql_cur, owner)
component_list.append({'product': product, 'owner': owner_name})
trac.setComponentList(component_list, 'product')
print "\n3. Import priorities..."
trac.setPriorityList(PRIORITIES)
print "\n4. Import versions..."
if BZ_VERSION >= 2180:
sql = """SELECT DISTINCTROW v.value AS value
FROM products p, versions v"""
sql += " WHERE v.product_id = p.id AND"
sql += makeWhereClause('p.name', PRODUCTS)
else:
sql = "SELECT DISTINCTROW value FROM versions"
sql += " WHERE" + makeWhereClause('program', PRODUCTS)
mysql_cur.execute(sql)
versions = mysql_cur.fetchall()
trac.setVersionList(versions, 'value')
print "\n5. Import milestones..."
sql = "SELECT DISTINCT value FROM milestones"
sql += " WHERE" + makeWhereClause('value', IGNORE_MILESTONES, negative=True)
mysql_cur.execute(sql)
milestones = mysql_cur.fetchall()
trac.setMilestoneList(milestones, 'value')
print "\n6. Retrieving bugs..."
if BZ_VERSION >= 2180:
sql = """SELECT DISTINCT b.*, c.name AS component, p.name AS product
FROM bugs AS b, components AS c, products AS p """
sql += " WHERE" + makeWhereClause('p.name', PRODUCTS)
sql += " AND b.product_id = p.id"
sql += " AND b.component_id = c.id"
sql += " ORDER BY b.bug_id"
else:
sql = """SELECT DISTINCT b.*, c.value AS component, p.product AS product
FROM bugs AS b, components AS c, products AS p """
sql += " WHERE" + makeWhereClause('p.product', PRODUCTS)
sql += " AND b.product = p.product"
sql += " AND b.component = c.value"
sql += " ORDER BY b.bug_id"
mysql_cur.execute(sql)
bugs = mysql_cur.fetchall()
print "\n7. Import bugs and bug activity..."
for bug in bugs:
bugid = bug['bug_id']
ticket = {}
keywords = []
ticket['id'] = bugid
ticket['time'] = bug['creation_ts']
ticket['changetime'] = bug['delta_ts']
if COMPONENTS_FROM_PRODUCTS:
ticket['component'] = bug['product']
else:
ticket['component'] = bug['component']
if SEVERITIES:
ticket['severity'] = bug['bug_severity']
ticket['priority'] = bug['priority'].lower()
else:
# use bugzilla severities as trac priorities, and ignore bugzilla
# priorities
ticket['severity'] = ''
ticket['priority'] = bug['bug_severity']
ticket['owner'] = trac.getLoginName(mysql_cur, bug['assigned_to'])
ticket['reporter'] = trac.getLoginName(mysql_cur, bug['reporter'])
# pack bugzilla fields into dictionary of trac custom field
# names and values
customfields = {}
for bugfield, customfield in CUSTOMFIELD_MAP.iteritems():
customfields[customfield] = bug[bugfield]
ticket['customfields'] = customfields
mysql_cur.execute("SELECT * FROM cc WHERE bug_id = %s", bugid)
cc_records = mysql_cur.fetchall()
cc_list = []
for cc in cc_records:
cc_list.append(trac.getLoginName(mysql_cur, cc['who']))
cc_list = [cc for cc in cc_list if cc not in IGNORE_CC]
ticket['cc'] = string.join(cc_list, ', ')
ticket['version'] = bug['version']
target_milestone = bug['target_milestone']
if target_milestone in IGNORE_MILESTONES:
target_milestone = ''
ticket['milestone'] = target_milestone
bug_status = bug['bug_status'].lower()
ticket['status'] = statusXlator[bug_status]
ticket['resolution'] = bug['resolution'].lower()
# a bit of extra work to do open tickets
if bug_status == 'open':
if owner != '':
ticket['status'] = 'assigned'
else:
ticket['status'] = 'new'
ticket['summary'] = bug['short_desc']
mysql_cur.execute("SELECT * FROM longdescs WHERE bug_id = %s" % bugid)
longdescs = list(mysql_cur.fetchall())
# check for empty 'longdescs[0]' field...
if len(longdescs) == 0:
ticket['description'] = ''
else:
ticket['description'] = longdescs[0]['thetext']
del longdescs[0]
for desc in longdescs:
ignore = False
for comment in IGNORE_COMMENTS:
if re.match(comment, desc['thetext']):
ignore = True
if ignore:
continue
trac.addTicketComment(ticket=bugid,
time = desc['bug_when'],
author=trac.getLoginName(mysql_cur, desc['who']),
value = desc['thetext'])
mysql_cur.execute("""SELECT * FROM bugs_activity WHERE bug_id = %s
ORDER BY bug_when""" % bugid)
bugs_activity = mysql_cur.fetchall()
resolution = ''
ticketChanges = []
keywords = []
for activity in bugs_activity:
field_name = trac.getFieldName(mysql_cur, activity['fieldid']).lower()
removed = activity[activityFields['removed']]
added = activity[activityFields['added']]
# statuses and resolutions are in lowercase in trac
if field_name == "resolution" or field_name == "bug_status":
removed = removed.lower()
added = added.lower()
# remember most recent resolution, we need this later
if field_name == "resolution":
resolution = added.lower()
add_keywords = []
remove_keywords = []
# convert bugzilla field names...
if field_name == "bug_severity":
if SEVERITIES:
field_name = "severity"
else:
field_name = "priority"
elif field_name == "assigned_to":
field_name = "owner"
elif field_name == "bug_status":
field_name = "status"
if removed in STATUS_KEYWORDS:
remove_keywords.append(STATUS_KEYWORDS[removed])
if added in STATUS_KEYWORDS:
add_keywords.append(STATUS_KEYWORDS[added])
added = statusXlator[added]
removed = statusXlator[removed]
elif field_name == "short_desc":
field_name = "summary"
elif field_name == "product" and COMPONENTS_FROM_PRODUCTS:
field_name = "component"
elif ((field_name == "product" and not COMPONENTS_FROM_PRODUCTS) or
(field_name == "component" and COMPONENTS_FROM_PRODUCTS)):
if MAP_ALL_KEYWORDS or removed in KEYWORDS_MAPPING:
kw = KEYWORDS_MAPPING.get(removed, removed)
if kw:
remove_keywords.append(kw)
if MAP_ALL_KEYWORDS or added in KEYWORDS_MAPPING:
kw = KEYWORDS_MAPPING.get(added, added)
if kw:
add_keywords.append(kw)
if field_name == "component":
# just keep the keyword change
added = removed = ""
elif field_name == "target_milestone":
field_name = "milestone"
if added in IGNORE_MILESTONES:
added = ""
if removed in IGNORE_MILESTONES:
removed = ""
ticketChange = {}
ticketChange['ticket'] = bugid
ticketChange['time'] = activity['bug_when']
ticketChange['author'] = trac.getLoginName(mysql_cur,
activity['who'])
ticketChange['field'] = field_name
ticketChange['oldvalue'] = removed
ticketChange['newvalue'] = added
if add_keywords or remove_keywords:
# ensure removed ones are in old
old_keywords = keywords + [kw for kw in remove_keywords if kw
not in keywords]
# remove from new
keywords = [kw for kw in keywords if kw not in remove_keywords]
# add to new
keywords += [kw for kw in add_keywords if kw not in keywords]
if old_keywords != keywords:
ticketChangeKw = ticketChange.copy()
ticketChangeKw['field'] = "keywords"
ticketChangeKw['oldvalue'] = ' '.join(old_keywords)
ticketChangeKw['newvalue'] = ' '.join(keywords)
ticketChanges.append(ticketChangeKw)
if field_name in IGNORED_ACTIVITY_FIELDS:
continue
# Skip changes that have no effect (think translation!).
if added == removed:
continue
# Bugzilla splits large summary changes into two records.
for oldChange in ticketChanges:
if (field_name == "summary"
and oldChange['field'] == ticketChange['field']
and oldChange['time'] == ticketChange['time']
and oldChange['author'] == ticketChange['author']):
oldChange['oldvalue'] += " " + ticketChange['oldvalue']
oldChange['newvalue'] += " " + ticketChange['newvalue']
break
# cc and attachments.isobsolete sometime appear
# in different activities with same time
if ((field_name == "cc" or field_name == "attachments.isobsolete") \
and oldChange['time'] == ticketChange['time']):
oldChange['newvalue'] += ", " + ticketChange['newvalue']
break
else:
ticketChanges.append (ticketChange)
for ticketChange in ticketChanges:
trac.addTicketChange (**ticketChange)
# For some reason, bugzilla v2.11 seems to clear the resolution
# when you mark a bug as closed. Let's remember it and restore
# it if the ticket is closed but there's no resolution.
if not ticket['resolution'] and ticket['status'] == "closed":
ticket['resolution'] = resolution
bug_status = bug['bug_status']
if bug_status in STATUS_KEYWORDS:
kw = STATUS_KEYWORDS[bug_status]
if kw not in keywords:
keywords.append(kw)
product = bug['product']
if product in KEYWORDS_MAPPING and not COMPONENTS_FROM_PRODUCTS:
kw = KEYWORDS_MAPPING.get(product, product)
if kw and kw not in keywords:
keywords.append(kw)
component = bug['component']
if (COMPONENTS_FROM_PRODUCTS and \
(MAP_ALL_KEYWORDS or component in KEYWORDS_MAPPING)):
kw = KEYWORDS_MAPPING.get(component, component)
if kw and kw not in keywords:
keywords.append(kw)
ticket['keywords'] = string.join(keywords)
ticketid = trac.addTicket(**ticket)
if BZ_VERSION >= 2210:
mysql_cur.execute("SELECT attachments.*, attach_data.thedata "
"FROM attachments, attach_data "
"WHERE attachments.bug_id = %s AND "
"attachments.attach_id = attach_data.id" % bugid)
else:
mysql_cur.execute("SELECT * FROM attachments WHERE bug_id = %s" %
bugid)
attachments = mysql_cur.fetchall()
for a in attachments:
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/htpasswd.py | trac/contrib/htpasswd.py | #!/usr/bin/python
"""Replacement for htpasswd"""
# Original author: Eli Carter
import os
import sys
import random
from optparse import OptionParser
# We need a crypt module, but Windows doesn't have one by default. Try to find
# one, and tell the user if we can't.
try:
import crypt
except ImportError:
try:
import fcrypt as crypt
except ImportError:
sys.stderr.write("Cannot find a crypt module. "
"Possibly http://carey.geek.nz/code/python-fcrypt/\n")
sys.exit(1)
def salt():
"""Returns a string of 2 randome letters"""
letters = 'abcdefghijklmnopqrstuvwxyz' \
'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \
'0123456789/.'
return random.choice(letters) + random.choice(letters)
class HtpasswdFile:
"""A class for manipulating htpasswd files."""
def __init__(self, filename, create=False):
self.entries = []
self.filename = filename
if not create:
if os.path.exists(self.filename):
self.load()
else:
raise Exception("%s does not exist" % self.filename)
def load(self):
"""Read the htpasswd file into memory."""
lines = open(self.filename, 'r').readlines()
self.entries = []
for line in lines:
username, pwhash = line.split(':')
entry = [username, pwhash.rstrip()]
self.entries.append(entry)
def save(self):
"""Write the htpasswd file to disk"""
open(self.filename, 'w').writelines(["%s:%s\n" % (entry[0], entry[1])
for entry in self.entries])
def update(self, username, password):
"""Replace the entry for the given user, or add it if new."""
pwhash = crypt.crypt(password, salt())
matching_entries = [entry for entry in self.entries
if entry[0] == username]
if matching_entries:
matching_entries[0][1] = pwhash
else:
self.entries.append([username, pwhash])
def delete(self, username):
"""Remove the entry for the given user."""
self.entries = [entry for entry in self.entries
if entry[0] != username]
def main():
"""%prog [-c] -b filename username password
Create or update an htpasswd file"""
# For now, we only care about the use cases that affect tests/functional.py
parser = OptionParser(usage=main.__doc__)
parser.add_option('-b', action='store_true', dest='batch', default=False,
help='Batch mode; password is passed on the command line IN THE CLEAR.'
)
parser.add_option('-c', action='store_true', dest='create', default=False,
help='Create a new htpasswd file, overwriting any existing file.')
parser.add_option('-D', action='store_true', dest='delete_user',
default=False, help='Remove the given user from the password file.')
options, args = parser.parse_args()
def syntax_error(msg):
"""Utility function for displaying fatal error messages with usage
help.
"""
sys.stderr.write("Syntax error: " + msg)
sys.stderr.write(parser.get_usage())
sys.exit(1)
if not options.batch:
syntax_error("Only batch mode is supported\n")
# Non-option arguments
if len(args) < 2:
syntax_error("Insufficient number of arguments.\n")
filename, username = args[:2]
if options.delete_user:
if len(args) != 2:
syntax_error("Incorrect number of arguments.\n")
password = None
else:
if len(args) != 3:
syntax_error("Incorrect number of arguments.\n")
password = args[2]
passwdfile = HtpasswdFile(filename, create=options.create)
if options.delete_user:
passwdfile.delete(username)
else:
passwdfile.update(username, password)
passwdfile.save()
if __name__ == '__main__':
main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/l10n_diff_index.py | trac/contrib/l10n_diff_index.py | # Copyright (C) 2013 Edgewall Software
# This file is distributed under the same license as the Trac project.
"""
L10N tool which prepares an index of "interesting" changes found in a
.diff.
Skipped changes are:
- the changes for which the msgid has changed
- removal only of msgstr content
Example workflow 1), review changes to the 'fr' translation before
committing:
make diff-fr | less
Example workflow 2), force a pull of all changes from Transifex::
make update updateopts=-N
tx pull -f
make update updateopts=-N
svn diff > tx.diff
python l10n_diff_index.py tx.diff
svn revert -R .
And then use either::
emacs tx.diff.index --eval '(grep-mode)'
or::
vim -c :cbuffer -c :copen tx.diff.index
This makes it easier to go through the potentially interesting changes
only, and apply the corresponding chunks if needed.
"""
from bisect import bisect_left
import re
interesting_changes_re = re.compile(r'''
\n
\s (?: msgid(?:_plural)?\s)? ".*" \n # ' ' msgid or "...",
(?:
[-\s] ".*" \n # ' ' or - "...",
|
- msgstr(?:\[\d+\])? \s ".*" \n # or the -msgstr
)*
(?:
( \+ msgstr(?:\[\d+\])? \s "[^"].*" ) \n # \1 is a non-empty +msgstr
|
[+\s] msgstr(?:\[\d+\])? \s ".*" \n # or after the msgstr,
(?: [-\s] ".*" \n # optional ' ' or -"...",
)*
( \+ "[^"].*" ) # \2 is a non-empty +"..."
)
''', re.MULTILINE | re.VERBOSE)
def index_diffs(path, diffs):
linenums = []
re.sub(r'\n', lambda m: linenums.append(m.start()), diffs)
index = []
for m in interesting_changes_re.finditer(diffs):
line = m.group(m.lastindex)
if line.startswith(('+"Project-Id-Version:', '+"PO-Revision-Date:')):
continue
pos = m.start(m.lastindex)
index.append((bisect_left(linenums, pos) + 1, line))
return index
def write_index_for(path):
diffs = unicode(file(path, 'rb').read(), 'utf-8')
changes = index_diffs(path, diffs)
if changes:
index = path + '.index'
with file(index, 'wb') as idx:
for n, line in changes:
print>>idx, (u"%s:%s: %s" % (path, n, line)).encode('utf-8')
print "%s: %d changes indexed in %s" % (path, len(changes), index)
else:
print "%s: no interesting changes" % (path,)
if __name__ == '__main__':
import sys
for path in sys.argv[1:]:
write_index_for(path)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/migrateticketmodel.py | trac/contrib/migrateticketmodel.py | #!/usr/bin/env python
#
# This script completely migrates a <= 0.8.x Trac environment to use the new
# default ticket model introduced in Trac 0.9.
#
# In particular, this means that the severity field is removed (or rather
# disabled by removing all possible values), and the priority values are
# changed to the more meaningful new defaults.
#
# Make sure to make a backup of the Trac environment before running this!
from __future__ import with_statement
import os
import sys
from trac.env import open_environment
from trac.ticket.model import Priority, Severity
priority_mapping = {
'highest': 'blocker',
'high': 'critical',
'normal': 'major',
'low': 'minor',
'lowest': 'trivial'
}
def main():
if len(sys.argv) < 2:
print >> sys.stderr, 'usage: %s /path/to/projenv' \
% os.path.basename(sys.argv[0])
sys.exit(2)
env = open_environment(sys.argv[1])
with env.db_transaction:
for oldprio, newprio in priority_mapping.items():
priority = Priority(env, oldprio)
priority.name = newprio
priority.update()
for severity in list(Severity.select(env)):
severity.delete()
if __name__ == '__main__':
main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/htdigest.py | trac/contrib/htdigest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# Copyright (C) 2006 Matthew Good <matt@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Matthew Good <matt@matt-good.net>
import errno
import fileinput
from getpass import getpass
from hashlib import md5
from optparse import OptionParser
import sys
def ask_pass():
pass1 = getpass('New password: ')
pass2 = getpass('Re-type new password: ')
if pass1 != pass2:
print >>sys.stderr, "They don't match, sorry"
sys.exit(1)
return pass1
def get_digest(userprefix, password=None):
if password == None:
password = ask_pass()
return make_digest(userprefix, password)
def make_digest(userprefix, password):
return userprefix + md5(userprefix + password).hexdigest()
usage = "%prog [-c] [-b] passwordfile realm username"
parser = OptionParser(usage=usage)
parser.add_option('-c', action='store_true', dest='create', default=False,
help='Create a new file')
parser.add_option('-b', action='store_true', dest='batch', default=False,
help='Batch mode, password on the commandline.')
opts, args = parser.parse_args()
try:
if opts.batch:
filename, realm, username, password = args
else:
filename, realm, username = args
password = None
except ValueError:
parser.error('Wrong number of arguments')
prefix = '%s:%s:' % (username, realm)
if opts.create:
try:
f = open(filename, 'w')
except EnvironmentError, e:
if e.errno == errno.EACCES:
print >>sys.stderr, 'Unable to update file', filename
sys.exit(1)
else:
raise
try:
print >>f, get_digest(prefix, password)
finally:
f.close()
else:
try:
matched = False
for line in fileinput.input(filename, inplace=True):
if line.startswith(prefix):
if not matched:
print get_digest(prefix, password)
matched = True
else:
print line,
if not matched:
f = open(filename, 'a')
try:
print >>f, get_digest(prefix, password)
finally:
f.close()
except EnvironmentError, e:
if e.errno == errno.ENOENT:
print >>sys.stderr, 'Could not open passwd file %s for reading.' \
% filename
print >>sys.stderr, 'Use -c option to create a new one.'
sys.exit(1)
elif e.errno == errno.EACCES:
print >>sys.stderr, 'Unable to update file', filename
sys.exit(1)
else:
raise
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/l10n_reset_en_GB.py | trac/contrib/l10n_reset_en_GB.py | # Copyright (C) 2013 Edgewall Software
# This file is distributed under the same license as the Trac project.
"""
L10N tool which copies all msgid to the msgstr.
This can be useful to verify the actual changes in the en_UK message
catalogs.
"""
import re
msgid_msgstr_re = re.compile(r'''
( # \1 "en_US"
\r?\n
msgid \s ".*" \r?\n
(?: (?: msgid_plural \s )? ".*" \r?\n
)*
)
( # \2 "en_GB"
msgstr.* \s ".*" \r?\n
(?: (?: msgstr.* \s )? ".*" \r?\n
)* \r?\n
)
''', re.MULTILINE | re.VERBOSE)
def reset_file(path):
with file(path, 'rb+') as f:
eol = '\r\n'
content = f.read()
if eol not in content:
eol = '\n'
def reset_msgstr(m):
msgid, msgstr = m.groups()
if '\nmsgid_plural' in msgid:
msgstr = (msgid
.replace(eol + 'msgid_plural', eol + 'msgstr[1]')
.replace(eol + 'msgid', 'msgstr[0]'))
else:
msgstr = msgid.replace(eol + 'msgid', 'msgstr')
return msgid + msgstr + eol
sanitized, nsub = msgid_msgstr_re.subn(reset_msgstr, content)
if nsub:
print("reset %d messages to en_US in %s" % (nsub, path))
f.seek(0)
f.write(sanitized)
f.truncate()
else:
print("no messages found in %s" % (path,))
if __name__ == '__main__':
import sys
for path in sys.argv[1:]:
reset_file(path)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/workflow/migrate_original_to_basic.py | trac/contrib/workflow/migrate_original_to_basic.py | #!/usr/bin/python
import sys
import trac.env
from trac.ticket.default_workflow import load_workflow_config_snippet
def main():
"""Rewrite the ticket-workflow section of the config; and change all
'assigned' tickets to 'accepted'.
"""
if len(sys.argv) != 2:
print "Usage: %s path_to_trac_environment" % sys.argv[0]
sys.exit(1)
tracdir = sys.argv[1]
trac_env = trac.env.open_environment(tracdir)
# Update the config...
old_workflow = trac_env.config.options('ticket-workflow')
for name, value in old_workflow:
trac_env.config.remove('ticket-workflow', name)
load_workflow_config_snippet(trac_env.config, 'basic-workflow.ini')
trac_env.config.save()
# Update the ticket statuses...
trac_env.db_transaction("""
UPDATE ticket SET status = 'accepted' WHERE status = 'assigned'
""")
if __name__ == '__main__':
main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/contrib/workflow/workflow_parser.py | trac/contrib/workflow/workflow_parser.py | #!/usr/bin/env python
import sys
import getopt
import locale
import pkg_resources
pkg_resources.require('Trac')
from trac.config import Configuration
from trac.ticket.default_workflow import parse_workflow_config
_debug = False
def debug(s):
if _debug:
sys.stderr.write(s)
def readconfig(filename):
"""Returns a list of raw config options"""
config = Configuration(filename)
rawactions = list(config.options('ticket-workflow'))
debug("%s\n" % str(rawactions))
if not rawactions:
sys.stderr.write("ERROR: You don't seem to have a [ticket-workflow] "
"section.\n")
sys.exit(1)
return rawactions
class ColorScheme(object):
# cyan, yellow are too light in color
colors = ['black', 'blue', 'red', 'green', 'purple', 'orange', 'darkgreen']
def __init__(self):
self.mapping = {}
self.coloruse = [0,] * len(self.colors)
def get_color(self, name):
try:
colornum = self.mapping[name]
except(KeyError):
self.mapping[name] = colornum = self.pick_color(name)
self.coloruse[colornum] += 1
return self.colors[colornum]
def pick_color(self, name):
"""Pick a color that has not been used much so far."""
return self.coloruse.index(min(self.coloruse))
def actions2graphviz(actions, show_ops=False, show_perms=False):
"""Returns a list of lines to be fed to graphviz."""
# The size value makes it easier to create a useful printout.
color_scheme = ColorScheme()
digraph_lines = ["""
digraph G {
center=1
size="10,8"
{ rank=source; new [ shape=invtrapezium ] }
{ rank=sink; closed [ shape=trapezium ] }
"""]
for action, attributes in actions.items():
label = [attributes['name'], ]
if show_ops:
label += attributes['operations']
if show_perms:
label += attributes['permissions']
if 'set_resolution' in attributes:
label += ['(' + attributes['set_resolution'] + ')']
for oldstate in attributes['oldstates']:
color = color_scheme.get_color(attributes['name'])
digraph_lines.append(
' "%s" -> "%s" [label="%s" color=%s fontcolor=%s]' % \
(oldstate, attributes['newstate'], '\\n'.join(label), color,
color))
digraph_lines.append('}')
return digraph_lines
def main(filename, output, show_ops=False, show_perms=False):
# Read in the config
rawactions = readconfig(filename)
# Parse the config information
actions = parse_workflow_config(rawactions)
# Convert to graphviz
digraph_lines = actions2graphviz(actions, show_ops, show_perms)
# And output
output.write(unicode.encode('\n'.join(digraph_lines), locale.getpreferredencoding()))
def usage(output):
output.write('workflow_parser [options] configfile.ini [output.dot]\n'
'-h --help shows this message\n'
'-o --operations include operations in the graph\n'
'-p --permissions include permissions in the graph\n'
)
if __name__ == '__main__':
show_ops = False
show_perms = False
try:
opts, args = getopt.getopt(sys.argv[1:], 'hop', ['help', 'operations',
'permissions'])
except getopt.GetoptError:
usage(sys.stderr)
sys.exit(1)
for option, argument in opts:
if option in ('-h', '--help'):
usage(sys.stdout)
sys.exit(0)
elif option in ('-o', '--operations'):
show_ops = True
elif option in ('-p', '--permissions'):
show_perms = True
if not args:
sys.stderr.write('Syntax error: config filename required.\n')
usage(sys.stderr)
sys.stderr.flush()
sys.exit(1)
ini_filename = args[0]
if len(args) > 1:
output = open(args[1], 'w')
else:
output = sys.stdout
main(ini_filename, output, show_ops, show_perms)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/doc/conf.py | trac/doc/conf.py | # -*- coding: utf-8 -*-
#
# Trac documentation build configuration file, created by
# sphinx-quickstart on Wed May 14 09:05:13 2008.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the
# namespace that aren't pickleable (module imports are okay, they're
# removed automatically).
#
# All configuration values have a default value; values that are
# commented out serve to show the default value.
import sys, os
# General substitutions.
project = 'Trac'
copyright = '2012, Edgewall Software'
url = 'http://trac.edgewall.org'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# Devel or Release mode for the documentation (if devel, include TODOs,
# can also be used in conditionals: .. ifconfig :: devel)
devel = True
if devel:
release += 'dev'
# If your extensions are in another directory, add it here. If the
# directory is relative to the documentation root, use os.path.abspath
# to make it absolute, like shown here.
# sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = []
# -- Autodoc
extensions.append('sphinx.ext.autodoc')
autoclass_content = 'both'
autodoc_member_order = 'bysource'
# -- Conditional content (see setup() below)
extensions.append('sphinx.ext.ifconfig')
# -- Link to other Sphinx documentations
extensions.append('sphinx.ext.intersphinx')
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
# -- Keep track of :todo: items
extensions.append('sphinx.ext.todo')
todo_include_todos = devel
# -- PDF support via http://code.google.com/p/rst2pdf/
try:
import rst2pdf
extensions.append('rst2pdf.pdfbuilder')
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['utils/templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_patterns = [
]
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'trac'
# The default role is a reference to some Python object
default_role = 'py:obj'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'tracsphinx.css'
html_theme = 'sphinxdoc'
html_theme_options = {
# 'linkcolor': '#B00',
# 'visitedlinkcolor': '#B00',
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = 'images/trac_logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['utils/']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Tracdoc'
modindex_common_prefix = ['trac.', 'tracopt.']
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Trac.tex', 'Trac API Documentation', 'The Trac Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Options for PDF output
# ----------------------
# (initially copied from
# http://rst2pdf.googlecode.com/svn/tags/0.16/doc/manual.txt)
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# If there is more than one author, separate them with \\.
# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
#
# The options element is a dictionary that lets you override
# this config per-document.
# For example,
# ('index', u'MyProject', u'My Project', u'Author Name',
# dict(pdf_compressed = True))
# would mean that specific document would be compressed
# regardless of the global pdf_compressed setting.
pdf_documents = [
('index', 'trac_dev', project, u'The Trac Team'),
]
# A comma-separated list of custom stylesheets (latest has higher precedence)
pdf_stylesheets = [
'sphinx',
'a4',
'trac',
os.path.join(os.path.dirname(__file__), 'utils', 'trac_dev_pdf.style')
]
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
pdf_compressed = True
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language = "en_US"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
pdf_break_level = 1
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
#pdf_breakside = 'any'
# Insert footnotes where they are defined instead of
# at the end.
#pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
#pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = True
# If false, no modindex is generated.
pdf_use_modindex = True
# If false, no coverpage is generated.
#pdf_use_coverpage = True
# Name of the cover page template to use
#pdf_cover_template = 'sphinxcover.tmpl'
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
# Set the default DPI for images
#pdf_default_dpi = 72
# Enable rst2pdf extension modules (default is only vectorpdf)
# you need vectorpdf if you want to use sphinx's graphviz support
#pdf_extensions = ['vectorpdf']
# Page template name for "regular" pages
#pdf_page_template = 'cutePage'
# Show Table Of Contents at the beginning?
#pdf_use_toc = True
# How many levels deep should the table of contents be?
pdf_toc_depth = 9999
# Add section number to section references
pdf_use_numbered_links = False
# Background images fitting mode
pdf_fit_background_mode = 'scale'
def setup(app):
# adding role for linking to InterTrac targets on t.e.o
from docutils import nodes
from docutils.parsers.rst import roles
def teo_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
# special case ticket references
if text[0] == '#':
ref = url + '/ticket/' + text[1:]
else:
ref = url + '/intertrac/' + text
roles.set_classes(options)
node = nodes.reference(rawtext, text, refuri=ref, **options)
return [node], []
roles.register_canonical_role('teo', teo_role)
def extensionpoints_role(name, rawtext, text, lineno, inliner, options={},
content=[]):
ref = url + '/wiki/TracDev/PluginDevelopment/ExtensionPoints/' + text
roles.set_classes(options)
node = nodes.reference(rawtext, text + " extension point",
refuri=ref, **options)
return [node], []
roles.register_canonical_role('extensionpoints', extensionpoints_role)
# ifconfig variables
app.add_config_value('devel', '', True)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/doc/utils/checkapidoc.py | trac/doc/utils/checkapidoc.py | # -*- coding: utf-8 -*-
"""Trac API doc checker
Verify that all symbols belonging to modules already documented in the doc/api
Sphinx sources are referenced.
See http://trac.edgewall.org/wiki/TracDev/ApiDocs
"""
import fnmatch
import os
import re
import sys
excluded_docs = ['index.rst']
api_doc = 'doc/api'
def usage(cmd):
print "Usage: %s [FILE...]" % (cmd,)
print
print "FILE is a %s file and can be a glob pattern." % (api_doc,)
print "If no files are given, check all."
exit(0)
def main(argv):
api_files = rst_files = [rst for rst in os.listdir('doc/api')
if fnmatch.fnmatch(rst, '*.rst')
and rst not in excluded_docs]
cmd = argv.pop(0)
def has(*options):
for opt in options:
if opt in argv:
return argv.pop(argv.index(opt))
if has('-h', '--help'):
usage(cmd)
verbose = has('-v', '--verbose')
only_documented = not has('-a', '--all')
if argv:
given_files = []
for arg in argv:
arg = arg.replace('\\', '/').replace(api_doc + '/', '')
arg = arg.replace('.rst', '') + '.rst'
if '*' in arg: # glob pattern
given_files += [rst for rst in api_files
if fnmatch.fnmatch(rst, arg)]
elif arg in api_files:
given_files.append(arg)
api_files = given_files
rst_basenames = sorted(f[:-4] for f in rst_files)
for rst in api_files:
basename = rst.replace('.rst', '')
if verbose or len(api_files) > 1:
print "== Checking %s ... " % (rst,)
check_api_doc(basename, verbose, only_documented,
any(f.startswith(basename) and f != basename
for f in rst_basenames))
def check_api_doc(basename, verbose, only_documented, has_submodules):
module_name = basename.replace('_', '.')
try:
module = __import__(module_name, globals(), {}, ['__all__'])
except ImportError, e:
print "Skipping %s (%s)" % (basename, e)
return
all = getattr(module, '__all__', None)
if not all:
print "Warning: %s doesn't define __all__, using exported symbols." % (
module_name,)
all = get_default_symbols(module, only_documented, has_submodules)
no_apidoc = getattr(module, '__no_apidoc__', None)
if no_apidoc:
if isinstance(no_apidoc, basestring):
no_apidoc = [s.strip() for s in no_apidoc.split()]
all = list(set(all) - set(no_apidoc))
symbols, keywords = get_sphinx_documented_symbols(basename + '.rst')
for symbol in sorted(all):
if symbol in symbols:
if verbose:
print " - OK %14s :: %s" % (
keywords[symbols.index(symbol)], symbol)
else:
value = getattr(module, symbol)
cls = getattr(value, '__class__', None)
keyword = 'data'
if cls.__name__ in ('function', 'instancemethod'):
keyword = 'function'
elif cls.__name__ == 'module':
keyword = 'module'
else:
keyword = 'class'
print " * .. %14s :: %s" % ('auto' + keyword, symbol)
sphinx_doc_re = re.compile(r'''
^.. \s+ ((?:py:|auto)(?:module|class|function|attribute)|data) # keyword
\s* :: \s* ([\w\.]+) # symbol
''', re.MULTILINE | re.VERBOSE)
def get_sphinx_documented_symbols(rst):
doc = file(os.path.join(api_doc, rst)).read()
symbols, keywords = [], []
for k, s in sphinx_doc_re.findall(doc):
symbols.append(s.split('.')[-1])
keywords.append(k)
return symbols, keywords
def get_default_symbols(module, only_documented, has_submodules):
public = get_public_symbols(module) - get_imported_symbols(module,
has_submodules)
# eliminate modules
all = []
for symbol in public:
try:
__import__(symbol)
except ImportError:
all.append(symbol)
# only keep symbols having a docstring
if only_documented:
documented = []
for symbol in all:
value = getattr(module, symbol)
if value.__doc__ and (not getattr(value, '__class__', None) or
value.__doc__ != value.__class__.__doc__):
documented.append(symbol)
all = documented
return all
def get_public_symbols(m):
return set(symbol for symbol in dir(m) if not symbol.startswith('_'))
import_from_re = re.compile(r'''
^ \s* from \s+ ([\w\.]+) \s+ import \s+ # module
( \* # all symbols
| %s (?: [\s\\]* , [\s\\]* %s)* # list of symbols
| \( \s* %s (?: \s* , \s* %s)* \s* \) # list of symbols in parenthesis
)
''' % ((r'(?:\w+|\w+\s+as\s+\w+)',) * 4), re.MULTILINE | re.VERBOSE)
remove_original_re = re.compile(r'\w+\s+as', re.MULTILINE)
def get_imported_symbols(module, has_submodules):
src_filename = module.__file__.replace('\\', '/').replace('.pyc', '.py')
if src_filename.endswith('/__init__.py') and not has_submodules:
return set()
src = file(src_filename).read()
imported = set()
for mod, symbol_list in import_from_re.findall(src):
symbol_list = symbol_list.strip()
if symbol_list == '*':
try:
imported_module = __import__(mod, globals(), {}, ['__all__'])
symbols = set(getattr(imported_module, '__all__', None) or
get_public_symbols(imported_module))
except ImportError:
print "Warning: 'from %s import *' couldn't be resolved" % (
mod,)
continue
else:
if symbol_list and symbol_list[0] == '(' and symbol_list[-1] == ')':
symbol_list = symbol_list[1:-1]
symbols = set(remove_original_re.sub('', symbol_list)
.replace('\\', '').replace(',', ' ').split())
imported |= symbols
return imported
if __name__ == '__main__':
main(sys.argv)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/trac/doc/utils/runepydoc.py | trac/doc/utils/runepydoc.py | # Simple wrapper script needed to run epydoc
import sys
try:
from epydoc.cli import cli
except ImportError:
print>>sys.stderr, "No epydoc installed (see http://epydoc.sourceforge.net)"
sys.exit(2)
# Epydoc 3.0.1 has some trouble running with recent Docutils (>= 0.6),
# so we work around this bug, following the lines of the fix in
# https://bugs.gentoo.org/attachment.cgi?id=210118
# (see http://bugs.gentoo.org/287546)
try:
from docutils.nodes import Text
if not hasattr(Text, 'data'):
setattr(Text, 'data', property(lambda self: self.astext()))
except ImportError:
print>>sys.stderr, "docutils is needed for running epydoc " \
"(see http://docutils.sourceforge.net)"
sys.exit(2)
# Epydoc doesn't allow much control over the generated graphs. This is
# bad especially for the class graph for Component which has a lot of
# subclasses, so we need to force Left-to-Right mode.
# from epydoc.docwriter.html import HTMLWriter
# HTMLWriter_render_graph = HTMLWriter.render_graph
# def render_graph_LR(self, graph):
# if graph:
# graph.body += 'rankdir=LR\n'
# return HTMLWriter_render_graph(self, graph)
# HTMLWriter.render_graph = render_graph_LR
# Well, LR mode doesn't really look better...
# the ASCII-art version seems better in most cases.
# Workaround "visiting unknown node type" error due to `.. note ::`
# This was due to the lack of Admonitions transforms. Add it.
from epydoc.markup.restructuredtext import _DocumentPseudoWriter
from docutils.transforms import writer_aux
orig_get_transforms = _DocumentPseudoWriter.get_transforms
def pseudo_get_transforms(self):
return orig_get_transforms(self) + [writer_aux.Admonitions]
_DocumentPseudoWriter.get_transforms = pseudo_get_transforms
# Run epydoc
cli()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/setup.py | bloodhound_multiproduct/setup.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import codecs
import sys
from setuptools import setup
# Force UTF-8 for stdout/err if --utf8 option is specified.
# For some reason python doesn't obey LANG/LC_CTYPE settings
# if output is not a terminal (e.g. pipes don't work).
if __name__ == '__main__':
for ac, av in enumerate(sys.argv):
if av == '--utf8':
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
sys.stderr = codecs.getwriter('utf8')(sys.stderr)
del sys.argv[ac]
break
extra = {}
try:
from trac.util.dist import get_l10n_cmdclass
except ImportError:
pass
else:
cmdclass = get_l10n_cmdclass()
if cmdclass:
extra['cmdclass'] = cmdclass
extractors = [
('**.py', 'trac.dist:extract_python', None),
('**/templates/**.html', 'genshi', None),
('**/templates/**.txt', 'genshi', {
'template_class': 'genshi.template:TextTemplate'
}),
]
extra['message_extractors'] = {
'multiproduct': extractors,
}
setup(
name='BloodhoundMultiProduct',
version='0.9.0',
description="Multiproduct support for Apache(TM) Bloodhound.",
author="Apache Bloodhound",
license="Apache License v2",
url="https://bloodhound.apache.org/",
packages=['multiproduct', 'multiproduct.ticket', 'multiproduct.util',
'tests', ],
package_data={
'multiproduct': ['templates/*.html', 'locale/*/LC_MESSAGES/*.mo']},
install_requires=['sqlparse'],
entry_points={'trac.plugins': [
'multiproduct.model = multiproduct.model',
'multiproduct.perm = multiproduct.perm',
'multiproduct.product_admin = multiproduct.product_admin',
'multiproduct.ticket.batch = multiproduct.ticket.batch',
'multiproduct.ticket.query = multiproduct.ticket.query',
'multiproduct.ticket.web_ui = multiproduct.ticket.web_ui',
'multiproduct.web_ui = multiproduct.web_ui',
], },
test_suite='tests.test_suite',
tests_require=['unittest2'] if sys.version_info < (2, 7) else [],
**extra
)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/wikisyntax.py | bloodhound_multiproduct/tests/wikisyntax.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's core wiki syntax in product environments"""
import os.path
import pkg_resources
import re
import shutil
import tempfile
import unittest
from genshi.core import escape
from trac.attachment import Attachment
from trac.web.href import Href
from trac.tests import wikisyntax
from trac.ticket.model import Ticket
from trac.ticket.tests import wikisyntax as ticket_wikisyntax
from trac.util.text import to_unicode
from multiproduct.api import PRODUCT_SYNTAX_DELIMITER
from multiproduct.env import ProductEnvironment
from multiproduct.ticket.query import ProductTicketQueryMacro
from tests.env import MultiproductTestCase
from tests.wiki import formatter
def attachment_setup(tc):
import trac.ticket.api
import trac.wiki.api
tc.global_env.path = os.path.join(tempfile.gettempdir(), 'trac-tempenv')
if tc.env is not tc.global_env:
del tc.env.path
attachment = Attachment(tc.env, 'wiki', 'WikiStart')
attachment.insert('file.txt', tempfile.TemporaryFile(), 0)
attachment = Attachment(tc.env, 'ticket', 123)
attachment.insert('file.txt', tempfile.TemporaryFile(), 0)
attachment = Attachment(tc.env, 'wiki', 'SomePage/SubPage')
attachment.insert('foo.txt', tempfile.TemporaryFile(), 0)
def attachment_teardown(tc):
tc.global_env.reset_db()
shutil.rmtree(tc.global_env.path)
def ticket_setup(tc):
ticket = Ticket(tc.env)
ticket.values.update({'reporter': 'santa',
'summary': 'This is the summary',
'status': 'new'})
# FIXME : UGLY ! Should not be explicit for product environments
ticket['product'] = (tc.env.product.prefix
if isinstance(tc.env, ProductEnvironment)
else '')
ticket.insert()
# Full syntax
PRODUCT_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-link-tests.txt'))
PRODUCT_ATTACHMENT_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-attachment-link-tests.txt'))
PRODUCT_SEARCH_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-search-link-tests.txt'))
PRODUCT_TICKET_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-ticket-link-tests.txt'))
PRODUCT_TICKET_JIRA = to_unicode(pkg_resources.resource_string(
__name__, 'product-ticket-jira-tests.txt'))
PRODUCT_REPORT_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-report-link-tests.txt'))
PRODUCT_MILESTONE_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-milestone-link-tests.txt'))
PRODUCT_QUERY_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-query-link-tests.txt'))
PRODUCT_QUERY2_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-query2-link-tests.txt'))
PRODUCT_COMMENT_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-comment-link-tests.txt'))
# Compact syntax
PRODUCT_ATTACHMENT_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-attachment-link-tests.short.txt'))
PRODUCT_SEARCH_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-search-link-tests.short.txt'))
PRODUCT_TICKET_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-ticket-link-tests.short.txt'))
PRODUCT_REPORT_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-report-link-tests.short.txt'))
PRODUCT_MILESTONE_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-milestone-link-tests.short.txt'))
PRODUCT_QUERY_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-query-link-tests.short.txt'))
PRODUCT_COMMENT_SHORTLINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-comment-link-tests.short.txt'))
PRODUCT_NOMATCH_LINKS = to_unicode(pkg_resources.resource_string(
__name__, 'product-nomatch-link-tests.txt'))
PRODUCT_PREFIXES = MultiproductTestCase.PRODUCT_DATA.keys()
PRODUCT_PREFIXES.remove(MultiproductTestCase.default_product)
def clear_base_href_setup(tc):
tc.global_env.href = Href('/')
tc.global_env.abs_href = Href('http://www.example.com/')
if tc.env is not tc.global_env:
del tc.env.abs_href
del tc.env.href
tc.env._href = tc.env._abs_href = None
def load_products_setup(prefixes):
def _load_products_setup(tc):
clear_base_href_setup(tc)
for p in prefixes :
tc._load_product_from_data(tc.global_env, p)
return _load_products_setup
def link_mp_setup(_setup):
def _link_mp_setup(tc):
clear_base_href_setup(tc)
_setup(tc)
return _link_mp_setup
# Product testing contexts explained
#
# Product testing contexts are a hack used to hijack the mechanism
# used by Trac test suite in order to run wiki test cases in product context
#
# title_prefix : prepend this text to test case summary
# link_prefix : used to put link references in given product context
# link_title_prefix : short text to highlight environment context
# path_prefix : prepended to URLs expanded using `link_prefix`
# main_product : prefix identifying the product bound to test case
# `env` attribute
# setup_product : optional prefix of the product that will be setup
# i.e. the product under test
# load_products : optional product prefixes list to load at setup time
# *_product_name : target product name (e.g. setup_product_name )
# *_escaped : escaped forms used to match output
TEST_PRODUCT_CONTEXTS = [
{'tc_title_prefix' : 'product: links',
'link_prefix' : 'product:tp1:',
'link_prefix_quote' : 'product:"tp1:',
'path_prefix' : '/products/tp1',
'main_product' : 'tp2',
'setup_product' : 'tp1',
'link_title_prefix' : '[tp1] ',
},
{'tc_title_prefix' : 'product: links unicode prefix',
'link_prefix' : u'product:xü:',
'link_prefix_quote' : u'product:"xü:',
'path_prefix' : '/products/x%C3%BC',
'main_product' : 'tp2',
'setup_product' : u'xü',
'link_title_prefix' : u'[xü] ',
},
# Ignored as TracLinks resolver won't match expression
#{'tc_title_prefix' : 'product:: refs to global',
# 'link_prefix' : 'product::',
# 'path_prefix' : '',
# 'main_product' : 'tp2',
# 'setup_product' : None,
# 'link_title_prefix' : '<global> '
# },
{'tc_title_prefix' : 'global: links',
'link_prefix' : 'global:',
'link_prefix_quote' : 'global:"',
'path_prefix' : '',
'main_product' : 'tp2',
'setup_product' : '',
'link_title_prefix' : '<global> ',
},
]
TEST_PRODUCT_CONTEXTS_COMPACT = [
{'tc_title_prefix' : 'short product syntax',
'link_prefix' : 'tp1' + PRODUCT_SYNTAX_DELIMITER,
'link_prefix_quote' : 'tp1%s"' % PRODUCT_SYNTAX_DELIMITER,
'path_prefix' : '/products/tp1',
'main_product' : 'tp2',
'setup_product' : 'tp1',
'link_title_prefix' : '[tp1] ',
},
{'tc_title_prefix' : 'short product syntax unicode prefix',
'link_prefix' : u'xü' + PRODUCT_SYNTAX_DELIMITER,
'link_prefix_quote' : u'xü%s"' % PRODUCT_SYNTAX_DELIMITER,
'path_prefix' : '/products/x%C3%BC',
'main_product' : 'tp2',
'setup_product' : u'xü',
'link_title_prefix' : u'[xü] ',
},
]
for ctxlst in (TEST_PRODUCT_CONTEXTS, TEST_PRODUCT_CONTEXTS_COMPACT):
for _ctx in ctxlst:
_product_extras = {}
for k,v in _ctx.iteritems():
_product_extras[k + '_escaped'] = escape(v)
if k.endswith('_product'):
if v in MultiproductTestCase.PRODUCT_DATA:
_product_extras[k + '_name'] = MultiproductTestCase.PRODUCT_DATA[v]['name']
else:
_product_extras[k + '_name'] = ''
_product_extras[k + '_name_escaped'] = escape(_product_extras[k + '_name'])
_ctx.update(_product_extras)
del _ctx, k, v, _product_extras
def test_suite():
suite = unittest.TestSuite()
# Legacy test cases
suite.addTest(formatter.test_suite(wikisyntax.SEARCH_TEST_CASES,
file=wikisyntax.__file__))
suite.addTest(formatter.test_suite(wikisyntax.ATTACHMENT_TEST_CASES,
file=wikisyntax.__file__,
context=('wiki', 'WikiStart'),
setup=attachment_setup,
teardown=attachment_teardown))
suite.addTest(formatter.test_suite(wikisyntax.EMAIL_TEST_CASE_DEFAULT,
file=wikisyntax.__file__,
context=wikisyntax.email_default_context()))
suite.addTest(formatter.test_suite(wikisyntax.EMAIL_TEST_CASE_NEVER_OBFUSCATE,
file=wikisyntax.__file__,
context=wikisyntax.email_default_context(),
setup=wikisyntax.email_never_obfuscate_setup))
# Product wiki syntax
suite.addTest(formatter.test_suite(PRODUCT_LINKS,
setup=load_products_setup(PRODUCT_PREFIXES),
file=__file__))
suite.addTests(formatter.test_suite(PRODUCT_SEARCH_LINKS % ctx,
file=__file__,
setup=clear_base_href_setup,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_ATTACHMENT_LINKS % ctx,
file=__file__,
context=('wiki', 'WikiStart'),
setup=link_mp_setup(attachment_setup),
teardown=attachment_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_TICKET_LINKS % ctx,
link_mp_setup(ticket_wikisyntax.ticket_setup),
__file__,
# No need to invoke it anymore
# ticket_wikisyntax.ticket_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_TICKET_JIRA % ctx,
link_mp_setup(ticket_wikisyntax.ticket_setup),
__file__,
# No need to invoke it anymore
# ticket_wikisyntax.ticket_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS
if ctx['path_prefix'])
suite.addTests(formatter.test_suite(PRODUCT_REPORT_LINKS % ctx,
link_mp_setup(ticket_wikisyntax.report_setup),
__file__,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_MILESTONE_LINKS % ctx,
link_mp_setup(ticket_wikisyntax.milestone_setup),
__file__,
ticket_wikisyntax.milestone_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_QUERY_LINKS % ctx,
link_mp_setup(ticket_setup),
__file__,
ticket_wikisyntax.ticket_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_QUERY2_LINKS % ctx,
link_mp_setup(ticket_wikisyntax.query2_setup),
__file__,
ticket_wikisyntax.query2_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
suite.addTests(formatter.test_suite(PRODUCT_COMMENT_LINKS % ctx,
clear_base_href_setup,
__file__,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS)
# Compact syntax
suite.addTests(formatter.test_suite(PRODUCT_SEARCH_SHORTLINKS % ctx,
file=__file__,
setup=clear_base_href_setup,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
suite.addTests(formatter.test_suite(PRODUCT_ATTACHMENT_SHORTLINKS % ctx,
file=__file__,
context=('wiki', 'WikiStart'),
setup=link_mp_setup(attachment_setup),
teardown=attachment_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
suite.addTests(formatter.test_suite(PRODUCT_TICKET_SHORTLINKS % ctx,
link_mp_setup(ticket_wikisyntax.ticket_setup),
__file__,
# No need to invoke it anymore
# ticket_wikisyntax.ticket_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
suite.addTests(formatter.test_suite(PRODUCT_REPORT_SHORTLINKS % ctx,
link_mp_setup(ticket_wikisyntax.report_setup),
__file__,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
suite.addTests(formatter.test_suite(PRODUCT_MILESTONE_SHORTLINKS % ctx,
link_mp_setup(ticket_wikisyntax.milestone_setup),
__file__,
ticket_wikisyntax.milestone_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
suite.addTests(formatter.test_suite(PRODUCT_QUERY_SHORTLINKS % ctx,
link_mp_setup(ticket_setup),
__file__,
ticket_wikisyntax.ticket_teardown,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
suite.addTests(formatter.test_suite(PRODUCT_COMMENT_SHORTLINKS % ctx,
clear_base_href_setup,
__file__,
mpctx=ctx)
for ctx in TEST_PRODUCT_CONTEXTS_COMPACT)
# Unmatched expressions
suite.addTest(formatter.test_suite(PRODUCT_NOMATCH_LINKS,
file=__file__))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/upgrade_postgres.py | bloodhound_multiproduct/tests/upgrade_postgres.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import psycopg2
import uuid
conn = psycopg2.connect(host='localhost', database='test')
cur = conn.cursor()
schema = str(uuid.uuid4()).replace('-', '')
cur.execute('CREATE SCHEMA "%s"' % schema)
cur.execute('DROP SCHEMA "%s"' % schema)
conn.close()
database_available = True
except Exception as err:
print err
database_available = False
import upgrade
from contextlib import contextmanager
from tests import unittest
@unittest.skipUnless(database_available, "Postgres database not available.")
class PostgresEnvironmentUpgradeTestCase(upgrade.EnvironmentUpgradeTestCase):
def setUp(self):
self.schema = str(uuid.uuid4()).replace('-', '')
super(PostgresEnvironmentUpgradeTestCase, self).setUp(
(('trac', 'database',
'postgres://localhost/test?schema=%s' % self.schema),)
)
def tearDown(self):
super(PostgresEnvironmentUpgradeTestCase, self).tearDown()
conn = psycopg2.connect(host='localhost', database='test')
cur = conn.cursor()
cur.execute('DROP SCHEMA "%s" CASCADE' % self.schema)
conn.commit()
conn.close()
@contextmanager
def assertFailsWithMissingTable(self):
with self.assertRaises(psycopg2.ProgrammingError) as cm:
yield
self.assertIn("relation", str(cm.exception))
self.assertIn("does not exist", str(cm.exception))
@contextmanager
def assertFailsWithMissingColumn(self):
with self.assertRaises(psycopg2.ProgrammingError) as cm:
yield
self.assertIn("column", str(cm.exception))
self.assertIn("does not exist", str(cm.exception))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/web_ui.py | bloodhound_multiproduct/tests/web_ui.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's web modules"""
import sys
import unittest
from wsgiref.util import setup_testing_defaults
from trac.core import Component, implements
from trac.perm import PermissionCache, PermissionSystem
from trac.resource import ResourceNotFound
from trac.web.api import HTTPInternalError, HTTPNotFound, IRequestFilter, \
Request, RequestDone
from trac.web.href import Href
from trac.web.main import RequestDispatcher
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from multiproduct.hooks import ProductRequestWithSession
from multiproduct.model import Product
from multiproduct.web_ui import ProductModule
from tests.env import MultiproductTestCase
#----------------
# Testing infrastructure for request handlers
#----------------
class TestRequestSpy(Component):
implements(IRequestFilter)
def testMatch(self, req, handler):
raise AssertionError('Test setup error: Missing match assertions')
def testProcessing(self, req, template, data, content_type):
raise AssertionError('Test setup error: Missing processing assertions')
# IRequestFilter methods
def pre_process_request(self, req, handler):
self.testMatch(req, handler)
return handler
def post_process_request(self, req, template, data, content_type):
self.testProcessing(req, template, data, content_type)
return template, data, content_type
class RequestHandlerTestCase(MultiproductTestCase):
"""Helper functions to write test cases for request handlers.
May be used as a mixin class.
"""
http_status = None
http_headers = None
http_body = None
record_response = False
def _get_request_obj(self, env):
environ = {}
setup_testing_defaults(environ)
environ['SCRIPT_NAME'] = env.href()
def start_response(status, headers):
if self.record_response:
self.http_status = status
self.http_headers = dict(headers)
self.http_body = []
return lambda body: self.http_body.append(body)
else:
return lambda body: None
req = ProductRequestWithSession(env, environ, start_response)
return req
def _dispatch(self, req, env):
req.perm = PermissionCache(env, req.authname)
return RequestDispatcher(env).dispatch(req)
def assertHttpHeaders(self, expectedHeaders):
for h, v in expectedHeaders.iteritems():
self.assertTrue(h in self.http_headers,
"Expected HTTP header '%s' not set" % (h,))
self.assertEquals(v, self.http_headers[h],
"Unexpected value for HTTP header '%s'" % (h,))
def assertRedirect(self, req, url, permanent=False):
if permanent:
self.assertEquals('301 Moved Permanently', self.http_status,
'Unexpected status code in HTTP redirect')
elif req.method == 'POST':
self.assertEquals('303 See Other', self.http_status,
'Unexpected status code in HTTP redirect')
else:
self.assertEquals('302 Found', self.http_status,
'Unexpected status code in HTTP redirect')
self.assertHttpHeaders({'Location' : url,
'Content-Type' : 'text/plain',
'Content-Length' : '0',
'Pragma' : 'no-cache',
'Cache-Control' : 'no-cache',
'Expires' : 'Fri, 01 Jan 1999 00:00:00 GMT'})
#----------------
# Testing product module
#----------------
class ProductModuleTestCase(RequestHandlerTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
self.global_env.enable_component(TestRequestSpy)
self.env.enable_component(TestRequestSpy)
TestRequestSpy(self.global_env).testMatch = self._assert_product_match
PermissionSystem(self.global_env).grant_permission('testuser', 'PRODUCT_CREATE')
PermissionSystem(self.global_env).grant_permission('testuser', 'PRODUCT_VIEW')
PermissionSystem(self.global_env).grant_permission('testuser', 'PRODUCT_MODIFY')
def tearDown(self):
self.global_env.reset_db()
self.env = self.global_env = None
expectedPrefix = None
expectedPathInfo = None
def _assert_product_match(self, req, handler):
self.assertIs(ProductModule(self.global_env), handler)
self.assertEqual(self.expectedPrefix, req.args['productid'],
"Unexpected product prefix")
self.assertEqual(self.expectedPathInfo, req.args['pathinfo'],
"Unexpected sub path")
def test_product_list(self):
spy = self.global_env[TestRequestSpy]
self.assertIsNot(None, spy)
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/products'
mps = MultiProductSystem(self.global_env)
def assert_product_list(req, template, data, content_type):
self.assertEquals('product_list.html', template)
self.assertIs(None, content_type)
self.assertEquals([mps.default_product_prefix,
self.default_product],
[p.prefix for p in data.get('products')])
self.assertTrue('context' in data)
ctx = data['context']
self.assertEquals('product', ctx.resource.realm)
self.assertEquals(None, ctx.resource.id)
spy.testProcessing = assert_product_list
with self.assertRaises(RequestDone):
self._dispatch(req, self.global_env)
def test_product_new(self):
spy = self.global_env[TestRequestSpy]
self.assertIsNot(None, spy)
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/products'
req.environ['QUERY_STRING'] = 'action=new'
def assert_product_new(req, template, data, content_type):
self.assertEquals('product_edit.html', template)
self.assertIs(None, content_type)
self.assertFalse('products' in data)
self.assertTrue('context' in data)
ctx = data['context']
self.assertEquals('product', ctx.resource.realm)
self.assertEquals(None, ctx.resource.id)
spy.testProcessing = assert_product_new
with self.assertRaises(RequestDone):
self._dispatch(req, self.global_env)
def test_product_view(self):
spy = self.global_env[TestRequestSpy]
self.assertIsNot(None, spy)
def assert_product_view(req, template, data, content_type):
self.assertEquals('product_view.html', template)
self.assertIs(None, content_type)
self.assertFalse('products' in data)
self.assertTrue('context' in data)
ctx = data['context']
self.assertEquals('product', ctx.resource.realm)
self.assertEquals(real_prefix, ctx.resource.id)
self.assertTrue('product' in data)
self.assertEquals(real_prefix, data['product'].prefix)
spy.testProcessing = assert_product_view
# Existing product
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/products/%s' % (self.default_product,)
real_prefix = self.default_product
self.expectedPrefix = self.default_product
self.expectedPathInfo = ''
with self.assertRaises(RequestDone):
self._dispatch(req, self.global_env)
def test_missing_product(self):
spy = self.global_env[TestRequestSpy]
self.assertIsNot(None, spy)
mps = MultiProductSystem(self.global_env)
def assert_product_list(req, template, data, content_type):
self.assertEquals('product_list.html', template)
self.assertIs(None, content_type)
self.assertEquals([mps.default_product_prefix,
self.default_product],
[p.prefix for p in data.get('products')])
self.assertTrue('context' in data)
ctx = data['context']
self.assertEquals('product', ctx.resource.realm)
self.assertEquals(None, ctx.resource.id)
spy.testProcessing = assert_product_list
# Missing product
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/products/missing'
self.expectedPrefix = 'missing'
self.expectedPathInfo = ''
with self.assertRaises(RequestDone):
self._dispatch(req, self.global_env)
self.assertEqual(1, len(req.chrome['warnings']))
self.assertEqual('Product missing not found',
req.chrome['warnings'][0].unescape())
def test_product_edit(self):
spy = self.global_env[TestRequestSpy]
self.assertIsNot(None, spy)
# HTTP GET
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/products/%s' % (self.default_product,)
req.environ['QUERY_STRING'] = 'action=edit'
real_prefix = self.default_product
def assert_product_edit(req, template, data, content_type):
self.assertEquals('product_edit.html', template)
self.assertIs(None, content_type)
self.assertFalse('products' in data)
self.assertTrue('context' in data)
ctx = data['context']
self.assertEquals('product', ctx.resource.realm)
self.assertEquals(real_prefix, ctx.resource.id)
self.assertTrue('product' in data)
self.assertEquals(real_prefix, data['product'].prefix)
spy.testProcessing = assert_product_edit
self.expectedPrefix = self.default_product
self.expectedPathInfo = ''
with self.assertRaises(RequestDone):
self._dispatch(req, self.global_env)
# HTTP POST
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['REQUEST_METHOD'] = 'POST'
req.environ['PATH_INFO'] = '/products/%s' % (self.default_product,)
req.args = dict(action='edit', description='New description',
prefix=self.default_product,
name=self.env.product.name)
spy.testProcessing = assert_product_edit
self.expectedPrefix = self.default_product
self.expectedPathInfo = ''
self.record_response = True
with self.assertRaises(RequestDone):
self._dispatch(req, self.global_env)
try:
product = Product(self.global_env,
{'prefix' : self.env.product.prefix})
except ResourceNotFound:
self.fail('Default test product deleted ?')
else:
self.assertEquals('New description', product.description)
product_url = Href(req.base_path).products(self.default_product)
self.assertRedirect(req, product_url)
def test_product_delete(self):
spy = self.global_env[TestRequestSpy]
self.assertIsNot(None, spy)
req = self._get_request_obj(self.global_env)
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/products/%s' % (self.default_product,)
req.environ['QUERY_STRING'] = 'action=delete'
self.expectedPrefix = self.default_product
self.expectedPathInfo = ''
spy.testProcessing = lambda *args, **kwargs: None
with self.assertRaises(HTTPInternalError) as test_cm:
self._dispatch(req, self.global_env)
self.assertEqual('500 Trac Error (Product removal is not allowed!)',
unicode(test_cm.exception))
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductModuleTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/model.py | bloodhound_multiproduct/tests/model.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for multiproduct/model.py"""
import shutil
import tempfile
from tests import unittest
from trac.core import TracError
from trac.test import EnvironmentStub
from trac.tests.resource import TestResourceChangeListener
from trac.ticket.model import Ticket
from bhdashboard.model import ModelBase
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from multiproduct.model import Product
class ProductTestCase(unittest.TestCase):
"""Unit tests covering the Product model"""
INITIAL_PREFIX = 'tp'
INITIAL_NAME = 'test project'
INITIAL_DESCRIPTION = 'a test project'
def setUp(self):
self.env = EnvironmentStub(enable=['trac.*', 'multiproduct.*'])
self.env.path = tempfile.mkdtemp(prefix='bh-product-tempenv-')
self.mpsystem = MultiProductSystem(self.env)
try:
self.mpsystem.upgrade_environment(self.env.db_transaction)
except self.env.db_exc.OperationalError:
# table remains but database version is deleted
pass
self.listener = self._enable_resource_change_listener()
self.default_data = {'prefix':self.INITIAL_PREFIX,
'name':self.INITIAL_NAME,
'description':self.INITIAL_DESCRIPTION}
self.global_env = self.env
self.product = Product(self.env)
self.product._data.update(self.default_data)
self.product.insert()
def tearDown(self):
shutil.rmtree(self.env.path)
self.env.reset_db()
def _enable_resource_change_listener(self):
listener = TestResourceChangeListener(self.env)
listener.resource_type = Product
listener.callback = self.listener_callback
return listener
def listener_callback(self, action, resource, context, old_values = None):
# pylint: disable=unused-argument
# pylint: disable=attribute-defined-outside-init
self.prefix = resource.prefix
self.name = resource.name
self.description = resource.description
def test_set_table_field(self):
"""tests that table.field style update works"""
test = {'prefix': 'td',
'name': 'test field access',
'description': 'product to test field setting'}
product = Product(self.env)
# attempt to set the fields from the data
product.prefix = test['prefix']
product.name = test['name']
product.description = test['description']
self.assertEqual(product._data['prefix'], test['prefix'])
self.assertEqual(product._data['name'], test['name'])
self.assertEqual(product._data['description'], test['description'])
def test_select(self):
"""tests that select can search Products by fields"""
p2_data = {'prefix':'tp2',
'name':'test project 2',
'description':'a different test project'}
p3_data = {'prefix':'tp3',
'name':'test project 3',
'description':'test project'}
product2 = Product(self.env)
product2._data.update(p2_data)
product3 = Product(self.env)
product3._data.update(p3_data)
product2.insert()
product3.insert()
products = list(Product.select(self.env, where={'prefix':'tp'}))
self.assertEqual(1, len(products))
products = list(Product.select(self.env,
where={'name':'test project'}))
self.assertEqual(1, len(products))
products = list(Product.select(self.env,
where={'prefix':'tp3', 'name':'test project 3'}))
self.assertEqual(1, len(products))
def test_update(self):
"""tests that we can use update to push data to the database"""
product = list(Product.select(self.env, where={'prefix':'tp'}))[0]
self.assertEqual('test project', product._data['name'])
new_data = {'prefix':'tp',
'name':'updated',
'description':'nothing'}
product._data.update(new_data)
product.update()
comp_product = list(Product.select(self.env, where={'prefix':'tp'}))[0]
self.assertEqual('updated', comp_product._data['name'])
def test_update_key_change(self):
"""tests that we raise an error for attempting to update key fields"""
bad_data = {'prefix':'tp0',
'name':'update',
'description':'nothing'}
product = list(Product.select(self.env, where={'prefix':'tp'}))[0]
product._data.update(bad_data)
self.assertRaises(TracError, product.update)
def test_insert(self):
"""test saving new Product"""
data = {'prefix':'new', 'name':'new', 'description':'new'}
product = Product(self.env)
product._data.update(data)
product.insert()
check_products = list(Product.select(self.env, where={'prefix':'new'}))
self.assertEqual(product._data['prefix'],
check_products[0]._data['prefix'])
self.assertEqual(1, len(check_products))
def test_insert_duplicate_key(self):
"""test attempted saving of Product with existing key fails"""
dupe_key_data = {'prefix':'tp',
'name':'dupe',
'description':'dupe primary key'}
product2 = Product(self.env)
product2._data.update(dupe_key_data)
self.assertRaises(TracError, product2.insert)
def test_delete(self):
"""test that we are able to delete Products"""
product = list(Product.select(self.env, where={'prefix':'tp'}))[0]
product.delete()
post = list(Product.select(self.env, where={'prefix':'tp'}))
self.assertEqual(0, len(post))
def test_delete_twice(self):
"""test that we error when deleting twice on the same key"""
product = list(Product.select(self.env, where={'prefix':'tp'}))[0]
product.delete()
self.assertRaises(TracError, product.delete)
def test_field_data_get(self):
"""tests that we can use table.field syntax to get to the field data"""
prefix = self.default_data['prefix']
name = self.default_data['name']
description = self.default_data['description']
product = list(Product.select(self.env, where={'prefix':prefix}))[0]
self.assertEqual(prefix, product.prefix)
self.assertEqual(name, product.name)
self.assertEqual(description, product.description)
def test_field_set(self):
"""tests that we can use table.field = something to set field data"""
prefix = self.default_data['prefix']
product = list(Product.select(self.env, where={'prefix':prefix}))[0]
new_description = 'test change of description'
product.description = new_description
self.assertEqual(new_description, product.description)
def test_missing_unique_fields(self):
"""ensure that that insert method works when _meta does not specify
unique fields when inserting more than one ProductResourceMap instances
"""
class TestModel(ModelBase):
"""A test model with no unique_fields"""
_meta = {'table_name': 'bloodhound_testmodel',
'object_name': 'TestModelObject',
'key_fields': ['id',],
'non_key_fields': ['value'],
'unique_fields': [],}
from trac.db import DatabaseManager
schema = [TestModel._get_schema(), ]
with self.env.db_transaction as db:
db_connector, dummy = DatabaseManager(self.env)._get_connector()
for table in schema:
for statement in db_connector.to_sql(table):
db(statement)
structure = dict([(table.name, [col.name for col in table.columns])
for table in schema])
tm1 = TestModel(self.env)
tm1._data.update({'id':1, 'value':'value1'})
tm1.insert()
tm2 = TestModel(self.env)
tm2._data.update({'id':2, 'value':'value2'})
tm2.insert()
def test_change_listener_created(self):
self.assertEqual('created', self.listener.action)
self.assertIsInstance(self.listener.resource, Product)
self.assertEqual(self.INITIAL_PREFIX, self.prefix)
self.assertEqual(self.INITIAL_NAME, self.name)
self.assertEqual(self.INITIAL_DESCRIPTION, self.description)
def test_change_listener_changed(self):
CHANGED_NAME = "changed name"
self.product.name = CHANGED_NAME
self.product.update()
self.assertEqual('changed', self.listener.action)
self.assertIsInstance(self.listener.resource, Product)
self.assertEqual(CHANGED_NAME, self.name)
self.assertEqual({"name":self.INITIAL_NAME}, self.listener.old_values)
def test_change_listener_deleted(self):
self.product.delete()
self.assertEqual('deleted', self.listener.action)
self.assertIsInstance(self.listener.resource, Product)
self.assertEqual(self.INITIAL_PREFIX, self.prefix)
def test_get_tickets(self):
for pdata in (
{'prefix': 'p2', 'name':'product, too', 'description': ''},
{'prefix': 'p3', 'name':'strike three', 'description': ''},
):
num_tickets = 5
product = Product(self.global_env)
product._data.update(pdata)
product.insert()
self.env = ProductEnvironment(self.global_env, product)
for i in range(num_tickets):
ticket = Ticket(self.env)
ticket['summary'] = 'hello ticket #%s-%d' % (product.prefix, i)
ticket['reporter'] = 'admin'
tid = ticket.insert()
# retrieve tickets using both global and product scope
tickets_from_global = [(t['product'], t['id']) for t in
Product.get_tickets(self.global_env, product.prefix)]
self.assertEqual(len(tickets_from_global), num_tickets)
tickets_from_product = [(t['product'], t['id']) for t in
Product.get_tickets(self.env)]
self.assertEqual(len(tickets_from_product), num_tickets)
# both lists should contain same elements
intersection = set(tickets_from_global) & set(tickets_from_product)
self.assertEqual(len(intersection), num_tickets)
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(unittest.makeSuite(ProductTestCase, 'test'))
return test_suite
if __name__ == '__main__':
unittest.main()
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/attachment.py | bloodhound_multiproduct/tests/attachment.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's attachments in product environments"""
import shutil
import os.path
import unittest
from trac.attachment import Attachment
from trac.test import EnvironmentStub
from trac.tests.attachment import AttachmentTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductAttachmentTestCase(AttachmentTestCase, MultiproductTestCase):
def setUp(self):
try:
AttachmentTestCase.setUp(self)
except:
self.global_env = self.env
self.tearDown()
raise
else:
self.global_env = global_env = self.env
self._upgrade_mp(global_env)
self._setup_test_log(global_env)
self._load_product_from_data(global_env, self.default_product)
self.env = ProductEnvironment(global_env, self.default_product)
# Root folder for default product environment
self.attachments_dir = os.path.join(self.global_env.path,
'products', self.default_product, 'files', 'attachments')
def tearDown(self):
if os.path.exists(self.global_env.path):
shutil.rmtree(self.global_env.path)
self.env.reset_db()
def test_product_path_isolation(self):
product_attachment = Attachment(self.env, 'ticket', '42')
global_attachment = Attachment(self.global_env, 'ticket', '42')
global_attachment.filename = product_attachment.filename = 'foo.txt'
self.assertNotEqual(product_attachment.path, global_attachment.path)
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductAttachmentTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/core.py | bloodhound_multiproduct/tests/core.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests import unittest
from trac.core import Interface, implements, Component
from multiproduct.core import MultiProductExtensionPoint
class MultiProductExtensionPointTestCase(unittest.TestCase):
def setUp(self):
from trac.core import ComponentManager, ComponentMeta
self.compmgr = ComponentManager()
# Make sure we have no external components hanging around in the
# component registry
self.old_registry = ComponentMeta._registry
ComponentMeta._registry = {}
def tearDown(self):
# Restore the original component registry
from trac.core import ComponentMeta
ComponentMeta._registry = self.old_registry
def test_with_trac_component_manager(self):
"""No parent attribute, no _all_product_envs method"""
class ComponentA(Component):
implements(ITest)
class ComponentB(Component):
mp_extension_point = MultiProductExtensionPoint(ITest)
components = ComponentB(self.compmgr).mp_extension_point
self.assertEqual(len(components), 1)
for c in components:
self.assertIsInstance(c, ComponentA)
def test_with_global_product_component_manager(self):
self.compmgr.parent = None
self.compmgr.all_product_envs = lambda: [self.compmgr, self.compmgr]
class ComponentA(Component):
implements(ITest)
class ComponentB(Component):
mp_extension_point = MultiProductExtensionPoint(ITest)
components = ComponentB(self.compmgr).mp_extension_point
self.assertEqual(len(components), 3)
for c in components:
self.assertIsInstance(c, ComponentA)
def test_with_product_component_manager(self):
self.compmgr.parent = self
self.compmgr.all_product_envs = lambda: [self.compmgr, self.compmgr]
class ComponentA(Component):
implements(ITest)
class ComponentB(Component):
mp_extension_point = MultiProductExtensionPoint(ITest)
components = ComponentB(self.compmgr).mp_extension_point
self.assertEqual(len(components), 1)
for c in components:
self.assertIsInstance(c, ComponentA)
class ITest(Interface):
def test():
"""Dummy function."""
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(MultiProductExtensionPointTestCase, 'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/resource.py | bloodhound_multiproduct/tests/resource.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os.path
import shutil
import tempfile
import unittest
from StringIO import StringIO
from datetime import datetime
import trac.ticket.api
import trac.ticket.report
import trac.ticket.roadmap
import trac.wiki.api
from trac import resource
from trac.attachment import Attachment
from trac.ticket.model import Ticket
from trac.util.datefmt import utc
from trac.wiki.model import WikiPage
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductResourceTestCase(MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self._load_product_from_data(self.global_env, u'xü')
self.env = ProductEnvironment(self.global_env, self.default_product)
self.env1 = ProductEnvironment(self.global_env, u'xü')
self._load_default_data(self.global_env)
self._load_default_data(self.env1)
# Enable product system component in product context
self.env.enable_component(MultiProductSystem)
def tearDown(self):
self.global_env.reset_db()
self.global_env = self.env = None
class ProductAttachmentResourceTestCase(ProductResourceTestCase):
def setUp(self):
ProductResourceTestCase.setUp(self)
self.global_env.path = os.path.join(tempfile.gettempdir(),
'trac-tempenv')
if os.path.exists(self.global_env.path):
shutil.rmtree(self.global_env.path)
os.mkdir(self.global_env.path)
attachment = Attachment(self.global_env, 'ticket', 1)
attachment.description = 'Global Bar'
attachment.insert('foo.txt', StringIO(''), 0)
attachment = Attachment(self.env1, 'ticket', 1)
attachment.description = 'Product Bar'
attachment.insert('foo.txt', StringIO(''), 0)
self.resource = resource.Resource('ticket',
1).child('attachment', 'foo.txt')
def tearDown(self):
shutil.rmtree(self.global_env.path)
ProductResourceTestCase.tearDown(self)
def test_global_neighborhood_attachments(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] Attachment 'foo.txt' in [global:] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Attachment 'foo.txt' in [global:] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] foo.txt ([global:] Ticket #1)",
resource.get_resource_shortname(self.env, target))
self.assertEquals('Global Bar',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/attachment/ticket/1/foo.txt',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_attachments(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] Attachment 'foo.txt' in [product:xü] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Attachment 'foo.txt' in [product:xü] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] foo.txt ([product:xü] Ticket #1)",
resource.get_resource_shortname(self.env, target))
self.assertEquals('Product Bar',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/attachment/ticket/1/foo.txt',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductMilestoneResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('milestone', 'milestone1')
def test_global_neighborhood_milestone(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_name(self.env, target))
self.assertEquals("milestone1",
resource.get_resource_shortname(self.env, target))
self.assertEquals("[global:] Milestone milestone1",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/milestone/milestone1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_milestone(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"milestone1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"[product:xü] Milestone milestone1",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/milestone/milestone1',
resource.get_resource_url(self.env,
target, self.env.href))
# FIXME: No resource manager for reports in core ?
class ProductReportResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('report', 1)
def test_global_neighborhood_report(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("[global:] report:1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] report:1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] report:1",
resource.get_resource_shortname(self.env, target))
self.assertEquals('[global:] report:1 at version None',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/report/1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_report(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] report:1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"[product:xü] report:1 at version None",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/report/1',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductTicketResourceTestCase(ProductResourceTestCase):
def _new_ticket(self, env, ticket_dict):
ticket = Ticket(env)
ticket.populate(ticket_dict)
return ticket.insert()
def setUp(self):
ProductResourceTestCase.setUp(self)
def test_global_neighborhood_ticket(self):
nbh = resource.Neighborhood('global', None)
data = dict(summary='Ticket summary', description='Ticket description',
type='enhancement', status='new')
target = nbh.child('ticket', self._new_ticket(self.global_env, data))
self.assertEquals("[global:] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals("[global:] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals("[global:] #1",
resource.get_resource_shortname(self.env, target))
self.assertEquals('enhancement: Ticket summary (new)',
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/ticket/1',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_ticket(self):
nbh = resource.Neighborhood('product', u'xü')
data = dict(summary='Ticket summary', description='Ticket description',
type='task', status='accepted')
target = nbh.child('ticket', self._new_ticket(self.env1, data))
self.assertEquals(u"[product:xü] Ticket #1",
resource.get_resource_description(self.env, target))
self.assertEquals(u"[product:xü] Ticket #1",
resource.get_resource_name(self.env, target))
self.assertEquals(u"[product:xü] #1",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"task: Ticket summary (accepted)",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/ticket/1',
resource.get_resource_url(self.env,
target, self.env.href))
class ProductWikiResourceTestCase(ProductResourceTestCase):
resource = resource.Resource('wiki', 'TestPage', version=2)
def setUp(self):
ProductResourceTestCase.setUp(self)
page = WikiPage(self.global_env)
page.name = 'TestPage'
page.text = 'Bla bla'
t = datetime(2001, 1, 1, 1, 1, 1, 0, utc)
page.save('joe', 'Testing global', '::1', t)
page.text = 'Bla bla bla'
t = datetime(2002, 2, 2, 2, 2, 2, 0, utc)
page.save('joe', 'Testing global 2', '::1', t)
page = WikiPage(self.env1)
page.name = 'TestPage'
page.text = 'alb alB'
t = datetime(2011, 1, 1, 1, 1, 1, 0, utc)
page.save('mary', 'Testing product', '::1', t)
page.text = 'Bla bla bla'
t = datetime(2012, 2, 2, 2, 2, 2, 0, utc)
page.save('mary', 'Testing product 2', '::1', t)
def test_global_neighborhood_wiki(self):
target = resource.Neighborhood('global', None).child(self.resource)
self.assertEquals("TestPage",
resource.get_resource_description(self.env, target))
self.assertEquals("TestPage",
resource.get_resource_name(self.env, target))
self.assertEquals("TestPage",
resource.get_resource_shortname(self.env, target))
self.assertEquals("TestPage",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/wiki/TestPage?version=2',
resource.get_resource_url(self.env,
target, self.env.href))
def test_product_neighborhood_wiki(self):
target = resource.Neighborhood('product', u'xü').child(self.resource)
self.assertEquals(u"TestPage",
resource.get_resource_description(self.env, target))
self.assertEquals(u"TestPage",
resource.get_resource_name(self.env, target))
self.assertEquals(u"TestPage",
resource.get_resource_shortname(self.env, target))
self.assertEquals(u"TestPage",
resource.get_resource_summary(self.env, target))
self.assertEquals('http://example.org/trac.cgi/products/x%C3%BC/wiki/TestPage?version=2',
resource.get_resource_url(self.env,
target, self.env.href))
class NeighborhoodTestCase(MultiproductTestCase):
def setUp(self):
self._mp_setup()
def test_get_known_neighborhoods(self):
rsys = resource.ResourceSystem(self.env)
self.assertEquals(['global', 'product'],
sorted(rsys.get_known_neighborhoods()))
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductAttachmentResourceTestCase, 'test'),
unittest.makeSuite(ProductMilestoneResourceTestCase, 'test'),
unittest.makeSuite(ProductReportResourceTestCase, 'test'),
unittest.makeSuite(ProductTicketResourceTestCase, 'test'),
unittest.makeSuite(ProductWikiResourceTestCase, 'test'),
unittest.makeSuite(NeighborhoodTestCase, 'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/config.py | bloodhound_multiproduct/tests/config.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's product configuration objects"""
import os.path
import shutil
import time
import unittest
from ConfigParser import ConfigParser
from StringIO import StringIO
from itertools import groupby
from trac.config import Option
from trac.tests.config import ConfigurationTestCase
from trac.util.text import to_unicode
from multiproduct.api import MultiProductSystem
from multiproduct.config import Configuration
from multiproduct.model import Product, ProductSetting
from tests.env import MultiproductTestCase
class MultiproductConfigTestCase(MultiproductTestCase):
r"""Test setup for configuration test cases.
"""
def setUp(self):
r"""Replace Trac environment with product environment
"""
self.env = self._setup_test_env()
# Dummy config file, a sibling of trac.ini
tmpdir = os.path.realpath(self.env.path)
self.filename = os.path.join(tmpdir, 'conf', 'product.ini')
# Ensure conf sub-folder is created
os.path.dirname(self.filename)
self._upgrade_mp(self.env)
self._setup_test_log(self.env)
self._load_product_from_data(self.env, self.default_product)
self._orig_registry = Option.registry
Option.registry = {}
def tearDown(self):
Option.registry = self._orig_registry
shutil.rmtree(self.env.path)
self.env = None
def _read(self, parents=None, product=None):
r"""Override superclass method by returning product-aware configuration
object retrieving settings from the database. Such objects will replace
instances of `trac.config.Configuration` used in inherited test cases.
"""
if product is None:
product = self.default_product
return Configuration(self.env, product, parents)
def _write(self, lines, product=None):
r"""Override superclass method by writing configuration values
to the database rather than ini file in the filesystem.
"""
if product is None:
product = self.default_product
product = to_unicode(product)
fp = StringIO(('\n'.join(lines + [''])).encode('utf-8'))
parser = ConfigParser()
parser.readfp(fp, 'bh-product-test')
with self.env.db_transaction as db:
# Delete existing setting for target product , if any
for setting in ProductSetting.select(self.env, db,
{'product' : product}):
setting.delete()
# Insert new options
for section in parser.sections():
option_key = dict(
section=to_unicode(section),
product=to_unicode(product)
)
for option, value in parser.items(section):
option_key.update(dict(option=to_unicode(option)))
setting = ProductSetting(self.env)
setting._data.update(option_key)
setting._data['value'] = to_unicode(value)
setting.insert()
def _test_with_inherit(self, testcb):
"""Almost exact copy of `trac.tests.config.ConfigurationTestCase`.
Differences explained in inline comments.
"""
# Parent configuration file created in environment's conf sub-folder
# PS: This modification would not be necessary if the corresponding
# statement in overriden method would be written the same way
# but the fact that both files have the same parent folder
# is not made obvious in there
sitename = os.path.join(os.path.dirname(self.filename), 'trac-site.ini')
try:
with open(sitename, 'w') as sitefile:
sitefile.write('[a]\noption = x\n')
self._write(['[inherit]', 'file = trac-site.ini'])
testcb()
finally:
os.remove(sitename)
def _dump_settings(self, config):
product = config.product
fields = ('section', 'option', 'value')
rows = [tuple(getattr(s, f, None) for f in fields) for s in
ProductSetting.select(config.env, where={'product' : product})]
dump = []
for section, group in groupby(sorted(rows), lambda row: row[0]):
dump.append('[%s]\n' % (section,))
for row in group:
dump.append('%s = %s\n' % (row[1], row[2]))
return dump
class ProductConfigTestCase(MultiproductConfigTestCase, ConfigurationTestCase):
r"""Test cases for Trac configuration objects rewritten for product
scope.
"""
# Test cases rewritten to avoid reading config file.
# It does make sense for product config as it's stored in the database
def test_set_and_save(self):
config = self._read()
config.set('b', u'öption0', 'y')
config.set(u'aä', 'öption0', 'x')
config.set('aä', 'option2', "Voilà l'été") # UTF-8
config.set(u'aä', 'option1', u"Voilà l'été") # unicode
# Note: the following would depend on the locale.getpreferredencoding()
# config.set('a', 'option3', "Voil\xe0 l'\xe9t\xe9") # latin-1
self.assertEquals('x', config.get(u'aä', u'öption0'))
self.assertEquals(u"Voilà l'été", config.get(u'aä', 'option1'))
self.assertEquals(u"Voilà l'été", config.get(u'aä', 'option2'))
config.save()
dump = self._dump_settings(config)
self.assertEquals([
u'[aä]\n',
u"option1 = Voilà l'été\n",
u"option2 = Voilà l'été\n",
u'öption0 = x\n',
# u"option3 = Voilà l'été\n",
u'[b]\n',
u'öption0 = y\n',
],
dump)
config2 = self._read()
self.assertEquals('x', config2.get(u'aä', u'öption0'))
self.assertEquals(u"Voilà l'été", config2.get(u'aä', 'option1'))
self.assertEquals(u"Voilà l'été", config2.get(u'aä', 'option2'))
# self.assertEquals(u"Voilà l'été", config2.get('a', 'option3'))
def test_set_and_save_inherit(self):
def testcb():
config = self._read()
config.set('a', 'option2', "Voilà l'été") # UTF-8
config.set('a', 'option1', u"Voilà l'été") # unicode
self.assertEquals('x', config.get('a', 'option'))
self.assertEquals(u"Voilà l'été", config.get('a', 'option1'))
self.assertEquals(u"Voilà l'été", config.get('a', 'option2'))
config.save()
dump = self._dump_settings(config)
self.assertEquals([
u'[a]\n',
u"option1 = Voilà l'été\n",
u"option2 = Voilà l'été\n",
u'[inherit]\n',
u"file = trac-site.ini\n",
],
dump)
config2 = self._read()
self.assertEquals('x', config2.get('a', 'option'))
self.assertEquals(u"Voilà l'été", config2.get('a', 'option1'))
self.assertEquals(u"Voilà l'été", config2.get('a', 'option2'))
self._test_with_inherit(testcb)
def test_overwrite(self):
config = self._read()
config.set('a', 'option', 'value1')
self.assertEquals('value1', config.get('a', 'option'))
config.set('a', 'option', 'value2')
self.assertEquals('value2', config.get('a', 'option'))
class ProductConfigSyncTestCase(MultiproductConfigTestCase):
"""Test cases for concurrent access of product configuration objects.
"""
def test_sync(self):
"""Config cache consistency on concurrent edits
"""
config1 = self._read()
config2 = self._read()
# Initial values will be empty
# This will initialize both instances' cache
self.assertEqual('', config1.get('s', 'o'))
self.assertEqual('', config2.get('s', 'o'))
# First time assignment, no actual cache
config1.set('s', 'o', 'value0')
self.assertEqual('value0', config1.get('s', 'o'))
self.assertEqual('value0', config2.get('s', 'o'))
# Subsequent hits retrieved from cache
config1.set('s', 'o', 'value1')
self.assertEqual('value0', config2.get('s', 'o'))
# ... unless cache invalidated e.g. by calling save()
config1.save()
self.assertTrue(config2.parse_if_needed())
self.assertEqual('value1', config1.get('s', 'o'))
self.assertEqual('value1', config2.get('s', 'o'))
# TODO: Replace with trac.util.compat:wait_for_file_mtime_change when
# changes from Trac 1.0-stable (> r12258) or Trac 1.0.2 are integrated
# Two edits may look simultaneous depending on FS accuracy,
# so wait 1 second to ensure next timestamp below will be different
# otherwise the test is fragile and results non-deterministic.
# This holds for Trac config objects too.
time.sleep(1)
# After update no subsequent modifications reported
config2.set('s', 'o', 'value2')
self.assertFalse(config1.parse_if_needed())
self.assertEqual('value1', config1.get('s', 'o'))
# ... unless cache invalidated e.g. by calling touch()
config2.touch()
self.assertTrue(config1.parse_if_needed())
self.assertEqual('value2', config1.get('s', 'o'))
self.assertEqual('value2', config2.get('s', 'o'))
self.assertTrue(config2.parse_if_needed())
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProductConfigTestCase,'test'))
suite.addTest(unittest.makeSuite(ProductConfigSyncTestCase,'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/__init__.py | bloodhound_multiproduct/tests/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import sys
from collections import deque
from fnmatch import fnmatch
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
from pkg_resources import resource_exists, resource_filename, \
resource_isdir, resource_listdir
class TestLoader(unittest.TestLoader):
testLoaderAttribute = '__testloader__'
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = unittest.TestSuite
def discover_package(self, package_or_requirement, pattern='*/test*.py',
ignore_subpkg_root=True, exclude=None):
"""Find and return all test modules from the specified package
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project
and registered with `pkg_resources` (e.g. via `setup.py develop`).
If a target test module contains a '__testloader__' attribute then
related object will override current loader for every individual
module across the hierarchy.
"""
pending = deque([(package_or_requirement, self, True)])
tests = []
log = logging.getLogger('bh.tests')
if len(log.handlers) == 0:
# Configure logger instance. otherwise messages won't be displayed
_configure_logger(log)
while pending:
mdlnm, loader, isdir = pending.popleft()
try:
mdl = self._get_module_from_name(mdlnm)
except (ImportError, ValueError):
# Log import error and skip packages that don't import
log.exception('Discovered package %s but import failed',
mdlnm)
continue
loader = getattr(mdl, self.testLoaderAttribute, None) or loader
if not (isdir and ignore_subpkg_root):
if mdlnm != package_or_requirement and hasattr(mdl, 'test_suite'):
tests.append(mdl.test_suite())
else:
tests.append(loader.loadTestsFromModule(mdl))
if isdir and resource_exists(mdlnm, '__init__.py'):
for fnm in resource_listdir(mdlnm, ''):
fpath = resource_filename(mdlnm, fnm)
if resource_isdir(mdlnm, fnm) \
and (exclude is None
or not fnmatch(fpath + '/', exclude)):
pending.append((mdlnm + '.' + fnm, loader, True))
elif any(fnm.endswith(ext) for ext in ['.py', '.pyc']) \
and fnmatch(fpath, pattern) \
and fnm != '__init__.py'\
and (exclude is None
or not fnmatch(fpath, exclude)):
submdlnm = mdlnm + '.' + fnm.rsplit('.', 1)[0]
pending.append((submdlnm, loader, False))
return self.suiteClass(tests)
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _configure_logger(log):
# See logging.basicConfig
handler = logging.StreamHandler()
formatter = logging.Formatter(logging.BASIC_FORMAT, None)
handler.setFormatter(formatter)
log.addHandler(handler)
def test_suite():
return TestLoader().discover_package('tests', pattern='*.py',
exclude='*/functional/*')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/upgrade.py | bloodhound_multiproduct/tests/upgrade.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import shutil
import tempfile
import uuid
from contextlib import contextmanager
from tests import unittest
from trac.attachment import Attachment, AttachmentAdmin
from trac.core import Component, implements
from trac.db import DatabaseManager
from trac.db.schema import Table, Column
from trac.env import IEnvironmentSetupParticipant
from trac.test import Environment
from trac.ticket import Ticket
from trac.wiki import WikiPage
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from multiproduct.model import Product
BLOODHOUND_TABLES = (
'bloodhound_product',
'bloodhound_productconfig',
'bloodhound_productresourcemap',
)
TABLES_WITH_PRODUCT_FIELD = (
'ticket', 'ticket_change', 'ticket_custom', 'attachment', 'component',
'milestone', 'wiki', 'report',
'version', 'enum', 'permission', 'system',
)
class EnvironmentUpgradeTestCase(unittest.TestCase):
def setUp(self, options=()):
env_path = tempfile.mkdtemp(prefix='bh-product-tempenv-')
self.env = Environment(env_path, create=True, options=options)
DummyPlugin.version = 1
def tearDown(self):
shutil.rmtree(self.env.path)
def test_can_upgrade_environment_with_multi_product_disabled(self):
self.env.upgrade()
# Multiproduct was not enabled so multiproduct tables should not exist
for table in BLOODHOUND_TABLES:
with self.assertFailsWithMissingTable():
self.env.db_direct_query("SELECT * FROM %s" % table)
for table in TABLES_WITH_PRODUCT_FIELD:
with self.assertFailsWithMissingColumn():
self.env.db_direct_query("SELECT product FROM %s" % table)
def test_upgrade_creates_multi_product_tables_and_adds_product_column(self):
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
for table in BLOODHOUND_TABLES:
db("SELECT * FROM %s" % table)
for table in TABLES_WITH_PRODUCT_FIELD:
db("SELECT product FROM %s" % table)
def test_upgrade_creates_default_product(self):
self._enable_multiproduct()
self.env.upgrade()
products = Product.select(self.env)
self.assertEqual(len(products), 1)
def test_upgrade_moves_tickets_and_related_objects_to_default_prod(self):
self._add_custom_field('custom_field')
with self.env.db_direct_transaction as db:
db("""INSERT INTO ticket (id) VALUES (1)""")
db("""INSERT INTO attachment (type, id, filename)
VALUES ('ticket', '1', '')""")
db("""INSERT INTO ticket_custom (ticket, name, value)
VALUES (1, 'custom_field', '42')""")
db("""INSERT INTO ticket_change (ticket, time, field)
VALUES (1, 42, 'summary')""")
self._enable_multiproduct()
self.env.upgrade()
with self.product('@'):
ticket = Ticket(self.env, 1)
attachments = list(Attachment.select(self.env,
ticket.resource.realm,
ticket.resource.id))
self.assertEqual(len(attachments), 1)
self.assertEqual(ticket['custom_field'], '42')
changes = ticket.get_changelog()
self.assertEqual(len(changes), 3)
def test_upgrade_moves_custom_wikis_to_default_product(self):
with self.env.db_direct_transaction as db:
db("""INSERT INTO wiki (name, version) VALUES ('MyPage', 1)""")
db("""INSERT INTO attachment (type, id, filename)
VALUES ('wiki', 'MyPage', '')""")
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
self.assertEqual(
len(db("""SELECT * FROM wiki WHERE product='@'""")), 1)
self.assertEqual(
len(db("""SELECT * FROM attachment
WHERE product='@'
AND type='wiki'""")), 1)
def test_upgrade_moves_system_wikis_to_products(self):
with self.env.db_direct_transaction as db:
db("""INSERT INTO wiki (name, version) VALUES ('WikiStart', 1)""")
db("""INSERT INTO attachment (type, id, filename)
VALUES ('wiki', 'WikiStart', '')""")
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
self.assertEqual(
len(db("""SELECT * FROM wiki WHERE product='@'""")), 1)
self.assertEqual(
len(db("""SELECT * FROM attachment
WHERE product='@'
AND type='wiki'""")), 1)
self.assertEqual(
len(db("""SELECT * FROM wiki WHERE product=''""")), 0)
self.assertEqual(
len(db("""SELECT * FROM attachment
WHERE product=''
AND type='wiki'""")), 0)
def test_upgrade_copies_content_of_system_tables_to_all_products(self):
mp = MultiProductSystem(self.env)
with self.env.db_direct_transaction as db:
mp._add_column_product_to_ticket(db)
mp._create_multiproduct_tables(db)
mp._update_db_version(db, 1)
for i in range(1, 6):
db("""INSERT INTO bloodhound_product (prefix, name)
VALUES ('p%d', 'Product 1')""" % i)
for table in ('component', 'milestone', 'enum', 'version',
'permission', 'report'):
db("""DELETE FROM %s""" % table)
db("""INSERT INTO component (name) VALUES ('foobar')""")
db("""INSERT INTO milestone (name) VALUES ('foobar')""")
db("""INSERT INTO version (name) VALUES ('foobar')""")
db("""INSERT INTO enum (type, name) VALUES ('a', 'b')""")
db("""INSERT INTO permission VALUES ('x', 'TICKET_VIEW')""")
db("""INSERT INTO report (title) VALUES ('x')""")
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
for table in ('component', 'milestone', 'version', 'enum',
'report'):
rows = db("SELECT * FROM %s" % table)
self.assertEqual(
len(rows), 6,
"Wrong number of lines in %s (%d instead of %d)\n%s"
% (table, len(rows), 6, rows))
for table in ('permission',):
# Permissions also hold rows for global product.
rows = db("SELECT * FROM %s WHERE username='x'" % table)
self.assertEqual(
len(rows), 7,
"Wrong number of lines in %s (%d instead of %d)\n%s"
% (table, len(rows), 7, rows))
def test_upgrading_database_moves_attachment_to_correct_product(self):
ticket = self.insert_ticket('ticket')
wiki = self.insert_wiki('MyWiki')
attachment = self._create_file_with_content('Hello World!')
self.add_attachment(ticket.resource, attachment)
self.add_attachment(wiki.resource, attachment)
self._enable_multiproduct()
self.env.upgrade()
with self.product('@'):
attachments = list(
Attachment.select(self.env, 'ticket', ticket.id))
attachments.extend(
Attachment.select(self.env, 'wiki', wiki.name))
self.assertEqual(len(attachments), 2)
for attachment in attachments:
self.assertEqual(attachment.open().read(), 'Hello World!')
def test_can_upgrade_database_with_ticket_attachment_with_text_ids(self):
with self.env.db_direct_transaction as db:
db("""INSERT INTO attachment (id, type, filename)
VALUES ('abc', 'ticket', '')""")
self._enable_multiproduct()
self.env.upgrade()
def test_can_upgrade_database_with_orphaned_attachments(self):
with self.env.db_direct_transaction as db:
db("""INSERT INTO attachment (id, type, filename)
VALUES ('5', 'ticket', '')""")
db("""INSERT INTO attachment (id, type, filename)
VALUES ('MyWiki', 'wiki', '')""")
self._enable_multiproduct()
self.env.upgrade()
def test_can_upgrade_multi_product_from_v1(self):
mp = MultiProductSystem(self.env)
with self.env.db_direct_transaction as db:
mp._add_column_product_to_ticket(db)
mp._create_multiproduct_tables(db)
mp._update_db_version(db, 1)
db("""INSERT INTO bloodhound_product (prefix, name)
VALUES ('p1', 'Product 1')""")
db("""INSERT INTO ticket (id, product)
VALUES (1, 'Product 1')""")
self._enable_multiproduct()
self.env.upgrade()
with self.product('p1'):
Ticket(self.env, 1)
def test_can_upgrade_multi_product_from_v2(self):
mp = MultiProductSystem(self.env)
with self.env.db_direct_transaction as db:
mp._add_column_product_to_ticket(db)
mp._create_multiproduct_tables(db)
mp._replace_product_on_ticket_with_product_prefix(db)
mp._update_db_version(db, 2)
db("""INSERT INTO bloodhound_product (prefix, name)
VALUES ('p1', 'Product 1')""")
db("""INSERT INTO ticket (id, product)
VALUES (1, 'p1')""")
db("""INSERT INTO ticket (id)
VALUES (2)""")
self._enable_multiproduct()
self.env.upgrade()
with self.product('p1'):
Ticket(self.env, 1)
with self.product('@'):
Ticket(self.env, 2)
def test_upgrade_plugin(self):
self._enable_component(DummyPlugin)
self.env.upgrade()
with self.env.db_direct_transaction as db:
db("SELECT v1 FROM dummy_table")
with self.assertFailsWithMissingColumn():
db("SELECT v2 FROM dummy_table")
DummyPlugin.version = 2
self.env.upgrade()
with self.env.db_direct_transaction as db:
db("SELECT v2 FROM dummy_table")
def test_upgrade_plugin_to_multiproduct(self):
self._enable_multiproduct()
self._enable_component(DummyPlugin)
self.env.upgrade()
with self.env.db_direct_transaction as db:
db("SELECT * FROM dummy_table")
db("""SELECT * FROM "@_dummy_table" """)
def test_upgrade_existing_plugin_to_multiproduct(self):
self._enable_component(DummyPlugin)
self.env.upgrade()
with self.env.db_direct_transaction as db:
with self.assertFailsWithMissingTable():
db("""SELECT * FROM "@_dummy_table" """)
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
db("SELECT * FROM dummy_table")
db("""SELECT * FROM "@_dummy_table" """)
def test_upgrading_existing_plugin_leaves_data_in_global_env(self):
DummyPlugin.version = 2
self._enable_component(DummyPlugin)
self.env.upgrade()
with self.env.db_direct_transaction as db:
for i in range(5):
db("INSERT INTO dummy_table (v1) VALUES ('%d')" % i)
self.assertEqual(
len(db("SELECT * FROM dummy_table")), 5)
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
self.assertEqual(
len(db('SELECT * FROM "dummy_table"')), 5)
self.assertEqual(
len(db('SELECT * FROM "@_dummy_table"')), 0)
def test_creating_new_product_calls_environment_created(self):
self._enable_component(DummyPlugin)
self._enable_multiproduct()
self.env.upgrade()
prod = Product(self.env)
prod.update_field_dict(dict(prefix='p1'))
ProductEnvironment(self.env, prod, create=True)
with self.env.db_direct_transaction as db:
db('SELECT * FROM "p1_dummy_table"')
def test_migrating_to_multiproduct_with_custom_default_prefix(self):
ticket = self.insert_ticket('ticket')
self.env.config.set('multiproduct', 'default_product_prefix', 'xxx')
self._enable_multiproduct()
self.env.upgrade()
products = Product.select(self.env)
self.assertEqual(len(products), 1)
self.assertEqual(products[0].prefix, 'xxx')
def test_migration_to_multiproduct_preserves_ticket_ids(self):
for ticket_id in (1, 3, 5, 7):
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute("INSERT INTO ticket (id) VALUES (%i)" % ticket_id)
db.update_sequence(cursor, 'ticket')
self._enable_multiproduct()
self.env.upgrade()
for ticket_id in (1, 3, 5, 7):
with self.product('@'):
ticket = Ticket(self.env, ticket_id)
self.assertEqual(ticket.id, ticket_id)
def test_can_insert_tickets_after_upgrade(self):
t1 = Ticket(self.env)
t1.summary = "test"
t1.insert()
self.assertEqual(t1.id, 1)
self._enable_multiproduct()
self.env.upgrade()
with self.product('@'):
ticket = Ticket(self.env)
ticket.summary = 'test'
ticket.insert()
self.assertEqual(ticket.id, 2)
def test_can_insert_tickets_with_same_id_to_different_products(self):
self._enable_multiproduct()
self.env.upgrade()
self.env.db_transaction("INSERT INTO ticket (id, summary)"
" VALUES (1, 'first product')")
t1 = Ticket(self.env, 1)
with self.product('@'):
self.env.db_transaction("INSERT INTO ticket (id, summary)"
" VALUES (1, 'second product')")
t2 = Ticket(self.env, 1)
self.assertEqual(t1.id, t2.id)
self.assertNotEqual(t1['summary'], t2['summary'])
def test_batch_ticket_insert_after_upgrade(self):
self._enable_multiproduct()
self.env.upgrade()
with self.env.db_direct_transaction as db:
db("""CREATE TABLE "@_tmp" (summary text, product text)""")
for summary in "abcdef":
db("""INSERT INTO "@_tmp" VALUES ('%s', '@')""" % (summary,))
with self.product('@'):
with self.env.db_transaction as db:
db("""INSERT INTO ticket (summary) SELECT summary FROM tmp""")
def _enable_multiproduct(self):
self._update_config('components', 'multiproduct.*', 'enabled')
def _add_custom_field(self, field_name):
self._update_config('ticket-custom', field_name, 'text')
def _enable_component(self, cls):
self._update_config(
'components',
'%s.%s' % (cls.__module__, cls.__name__),
'enabled'
)
def _update_config(self, section, key, value):
self.env.config.set(section, key, value)
self.env.config.save()
self.env = Environment(self.env.path)
def _create_file_with_content(self, content):
filename = str(uuid.uuid4())[:6]
path = os.path.join(self.env.path, filename)
with open(path, 'wb') as f:
f.write(content)
return path
@contextmanager
def assertFailsWithMissingTable(self):
with self.assertRaises(self.env.db_exc.OperationalError) as cm:
yield
self.assertIn('no such table', str(cm.exception))
@contextmanager
def assertFailsWithMissingColumn(self):
with self.assertRaises(self.env.db_exc.OperationalError) as cm:
yield
self.assertIn('no such column', str(cm.exception))
def create_ticket(self, summary, **kw):
ticket = Ticket(self.env)
ticket["summary"] = summary
for k, v in kw.items():
ticket[k] = v
return ticket
def insert_ticket(self, summary, **kw):
"""Helper for inserting a ticket into the database"""
ticket = self.create_ticket(summary, **kw)
ticket.insert()
return ticket
def create_wiki(self, name, text, **kw):
page = WikiPage(self.env, name)
page.text = text
for k, v in kw.items():
page[k] = v
return page
def insert_wiki(self, name, text = None, **kw):
text = text or "Dummy text"
page = self.create_wiki(name, text, **kw)
page.save("dummy author", "dummy comment", "::1")
return page
def add_attachment(self, resource, path):
resource = '%s:%s' % (resource.realm, resource.id)
AttachmentAdmin(self.env)._do_add(resource, path)
@contextmanager
def product(self, prefix):
old_env = self.env
self.env = ProductEnvironment(self.env, prefix)
yield
self.env = old_env
class DummyPlugin(Component):
implements(IEnvironmentSetupParticipant)
version = 1
def environment_created(self):
with self.env.db_transaction as db:
self.upgrade_environment(db)
def environment_needs_upgrade(self, db):
return self.get_version(db) < self.version
def upgrade_environment(self, db):
old_version = current_version = self.get_version(db)
db_connector, dummy = DatabaseManager(self.env)._get_connector()
while current_version < self.version:
if current_version > 0:
db("CREATE TEMPORARY TABLE dummy_table_old AS "
"SELECT * FROM dummy_table")
db("DROP TABLE dummy_table")
table = self.construct_dummy_table(current_version+1)
for statement in db_connector.to_sql(table):
db(statement)
if current_version > 0:
cols = ['id'] + ['v%i' % (i+1)
for i in range(current_version+1)]
db("""INSERT INTO dummy_table (%s)
SELECT %s, '' FROM dummy_table_old
""" % (', '.join(cols), ', '.join(cols[:-1])))
db("DROP TABLE dummy_table_old")
current_version += 1
if current_version != old_version:
self.update_version(db, current_version)
def construct_dummy_table(self, n_custom_fields=1):
fields = [Column('id')] + [
Column('v%d' % (i+1)) for i in range(n_custom_fields)
]
return Table('dummy_table')[fields]
def get_version(self, db):
rows = db("SELECT value FROM system WHERE name = %s",
(self.__class__.__name__,))
return int(rows[0][0]) if rows else 0
def update_version(self, db, version):
old_version = self.get_version(db)
if old_version:
db("UPDATE system SET value=%s WHERE name=%s",
(version, self.__class__.__name__,))
else:
db("INSERT INTO system (name, value) VALUES ('%s','%s')"
% (self.__class__.__name__, version))
return version
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(EnvironmentUpgradeTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/hooks.py | bloodhound_multiproduct/tests/hooks.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tests import unittest
from trac.web.href import Href
from multiproduct.hooks import ProductizedHref
class ProductizedHrefTestCase(unittest.TestCase):
def setUp(self):
self.ghref = Href('/gbase')
self.phref = ProductizedHref(self.ghref, '/gbase/product')
def test_paths_no_transform(self):
self.assertEqual('/gbase/admin', self.phref.admin())
self.assertEqual('/gbase/logout', self.phref.logout())
self.assertEqual('/gbase/prefs', self.phref('prefs'))
self.assertEqual('/gbase/verify_email?a=1&b=cde',
self.phref('verify_email', a=1, b='cde'))
def test_static_path_no_transform(self):
self.assertEqual('/gbase/js', self.phref('js/'))
self.assertEqual('/gbase/css', self.phref('css/'))
self.assertEqual('/gbase/img', self.phref('img/'))
def test_params_as_args(self):
self.assertEqual('/gbase/product/ticket/540',
self.phref('ticket', 540))
self.assertEqual('/gbase/product/ticket/540',
self.phref.ticket(540))
def test_params_as_kwargs(self):
self.assertIn(self.phref('ticket', param='value',
other='other value'),
['/gbase/product/ticket?param=value&other=other+value',
'/gbase/product/ticket?other=other+value¶m=value'])
def test_params_as_dictionary(self):
self.assertIn(self.phref.ticket({'param': 'value',
'other': 'other value'}),
['/gbase/product/ticket/?param=value&other=other+value',
'/gbase/product/ticket?other=other+value¶m=value'])
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductizedHrefTestCase, 'test')
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/perm.py | bloodhound_multiproduct/tests/perm.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's product permissions subsystem"""
import unittest
from trac.admin.api import AdminCommandError
from trac import perm
from trac.resource import Neighborhood
from trac.test import Mock
from trac.tests.perm import DefaultPermissionStoreTestCase,\
PermissionSystemTestCase, PermissionCacheTestCase,\
PermissionPolicyTestCase, TestPermissionPolicy, TestPermissionRequestor
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from multiproduct.model import Product
from multiproduct.perm import MultiproductPermissionPolicy, sudo
from tests.env import MultiproductTestCase
# DefaultPermission policy has its own cache that causes
# test_product_trac_admin_actions to fail sometimes.
perm.DefaultPermissionPolicy.CACHE_EXPIRY = 0
class ProductDefaultPermissionStoreTestCase(DefaultPermissionStoreTestCase,
MultiproductTestCase):
def setUp(self):
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.env, self.default_product)
self.env = ProductEnvironment(self.env, self.default_product)
self.store = perm.DefaultPermissionStore(self.env)
def test_env_isolation(self):
global_env = self.global_env
env = self.env
self._load_product_from_data(self.global_env, 'tp2')
env1 = ProductEnvironment(self.global_env, 'tp2')
global_store = perm.DefaultPermissionStore(global_env)
store = perm.DefaultPermissionStore(env)
store1 = perm.DefaultPermissionStore(env1)
global_env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_MODIFY'),
('dev', 'REPORT_ADMIN'),
('john', 'dev')])
env.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'WIKI_VIEW'),
('dev', 'REPORT_VIEW'),
('john', 'dev')])
env1.db_transaction.executemany(
"INSERT INTO permission VALUES (%s,%s)",
[('dev', 'TICKET_CREATE'),
('dev', 'MILESTONE_VIEW'),
('john', 'dev')])
self.assertEquals(['REPORT_ADMIN', 'WIKI_MODIFY'],
sorted(global_store.get_user_permissions('john')))
self.assertEquals(['REPORT_VIEW', 'WIKI_VIEW'],
sorted(store.get_user_permissions('john')))
self.assertEquals(['MILESTONE_VIEW', 'TICKET_CREATE'],
sorted(store1.get_user_permissions('john')))
class ProductPermissionSystemTestCase(PermissionSystemTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(enable=[
perm.PermissionSystem,
perm.DefaultPermissionStore,
TestPermissionRequestor])
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
def test_all_permissions(self):
# PRODUCT_ADMIN meta-permission in product context
self.assertEqual({'EMAIL_VIEW': True, 'TRAC_ADMIN': True,
'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TEST_ADMIN': True,
'PRODUCT_ADMIN' : True},
self.perm.get_user_permissions())
def test_expand_actions_iter_7467(self):
# Check that expand_actions works with iterators (#7467)
# PRODUCT_ADMIN meta-permission in product context
perms = set(['EMAIL_VIEW', 'TRAC_ADMIN', 'TEST_DELETE', 'TEST_MODIFY',
'TEST_CREATE', 'TEST_ADMIN', 'PRODUCT_ADMIN'])
self.assertEqual(perms, self.perm.expand_actions(['TRAC_ADMIN']))
self.assertEqual(perms, self.perm.expand_actions(iter(['TRAC_ADMIN'])))
class ProductPermissionCacheTestCase(PermissionCacheTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(enable=[
perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
TestPermissionRequestor])
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
class ProductNeighborhoodPermissionCacheTestCase(ProductPermissionCacheTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(enable=[
perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
MultiProductSystem,
TestPermissionRequestor])
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
def setUp(self):
ProductPermissionCacheTestCase.setUp(self)
nbh = Neighborhood('product', self.default_product)
resource = nbh.child(None, None)
self.perm = perm.PermissionCache(self.global_env, 'testuser', resource)
class SudoTestCase(ProductPermissionCacheTestCase):
loader = unittest.defaultTestLoader
tcnames = loader.getTestCaseNames(ProductPermissionCacheTestCase)
_gen_tests = {}
def test_sudo_wrong_context(self):
sudoperm = sudo(None, 'EMAIL_VIEW', ['TEST_ADMIN'])
with self.assertRaises(RuntimeError) as test_cm:
sudoperm.has_permission('TEST_MODIFY')
self.assertEqual('Permission check out of context',
str(test_cm.exception))
with self.assertRaises(ValueError) as test_cm:
with sudoperm:
pass
self.assertEquals('Context manager not bound to request object',
str(test_cm.exception))
def test_sudo_fail_require(self):
sudoperm = sudo(None, 'EMAIL_VIEW', ['TEST_ADMIN'])
sudoperm.perm = self.perm
with self.assertRaises(perm.PermissionError) as test_cm:
sudoperm.require('TRAC_ADMIN')
self.assertEqual('EMAIL_VIEW', test_cm.exception.action)
def test_sudo_grant_meta_perm(self):
self.env.parent.enable_component(perm.PermissionSystem)
self.env.enable_component(perm.PermissionSystem)
del self.env.parent.enabled[perm.PermissionSystem]
del self.env.enabled[perm.PermissionSystem]
sudoperm = sudo(None, 'TEST_CREATE', ['TRAC_ADMIN'])
sudoperm.perm = self.perm
self.assertTrue(sudoperm.has_permission('EMAIL_VIEW'))
def test_sudo_ambiguous(self):
with self.assertRaises(ValueError) as test_cm:
sudo(None, 'TEST_MODIFY', ['TEST_MODIFY', 'TEST_DELETE'],
['TEST_MODIFY', 'TEST_CREATE'])
self.assertEquals('Impossible to grant and revoke (TEST_MODIFY)',
str(test_cm.exception))
with self.assertRaises(ValueError) as test_cm:
sudoperm = sudo(None, 'TEST_MODIFY', ['TEST_ADMIN'],
['TEST_MODIFY', 'TEST_CREATE'])
sudoperm.perm = self.perm
self.assertEquals('Impossible to grant and revoke '
'(TEST_CREATE, TEST_MODIFY)',
str(test_cm.exception))
with self.assertRaises(ValueError) as test_cm:
req = Mock(perm=self.perm)
sudo(req, 'TEST_MODIFY', ['TEST_ADMIN'],
['TEST_MODIFY', 'TEST_CREATE'])
self.assertEquals('Impossible to grant and revoke '
'(TEST_CREATE, TEST_MODIFY)',
str(test_cm.exception))
# Sudo permission context equivalent to permissions cache
# if there's no action to require, allow or deny.
def _test_with_sudo_rules(tcnm, prefix, grant):
target = getattr(ProductPermissionCacheTestCase, tcnm)
def _sudo_eq_checker(self):
for action in grant:
self.perm_system.revoke_permission('testuser', action)
realperm = self.perm
self.perm = sudo(None, [], grant, [])
self.perm.perm = realperm
target(self)
_sudo_eq_checker.func_name = prefix + tcnm
return _sudo_eq_checker
for tcnm in tcnames:
f1 = _test_with_sudo_rules(tcnm, '', [])
f2 = _test_with_sudo_rules(tcnm, 'test_sudo_partial_',
['TEST_MODIFY'])
f3 = _test_with_sudo_rules(tcnm, 'test_sudo_full_',
['TEST_MODIFY', 'TEST_ADMIN'])
for f in (f1, f2, f3):
_gen_tests[f.func_name] = f
del loader, tcnames, tcnm, f1, f2, f3
list(setattr(SudoTestCase, tcnm, f)
for tcnm, f in SudoTestCase._gen_tests.iteritems())
class ProductPermissionPolicyTestCase(PermissionPolicyTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(enable=[
perm.DefaultPermissionStore,
perm.DefaultPermissionPolicy,
perm.PermissionSystem,
TestPermissionPolicy,
TestPermissionRequestor,
MultiproductPermissionPolicy])
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
def setUp(self):
super(ProductPermissionPolicyTestCase, self).setUp()
self.global_env.config.set('trac', 'permission_policies',
'DefaultPermissionPolicy')
self.permsys = perm.PermissionSystem(self.env)
self.global_perm_admin = perm.PermissionAdmin(self.global_env)
self.product_perm_admin = perm.PermissionAdmin(self.env)
def tearDown(self):
self.global_env.reset_db()
self.global_env = self.env = None
def test_prepend_mp_policy(self):
self.assertEqual([MultiproductPermissionPolicy(self.env), self.policy],
self.permsys.policies)
def test_policy_chaining(self):
self.env.config.set('trac', 'permission_policies',
'TestPermissionPolicy,DefaultPermissionPolicy')
self.policy.grant('testuser', ['TEST_MODIFY'])
system = perm.PermissionSystem(self.env)
system.grant_permission('testuser', 'TEST_ADMIN')
self.assertEqual(list(system.policies),
[MultiproductPermissionPolicy(self.env),
self.policy,
perm.DefaultPermissionPolicy(self.env)])
self.assertEqual('TEST_MODIFY' in self.perm, True)
self.assertEqual('TEST_ADMIN' in self.perm, True)
self.assertEqual(self.policy.results,
{('testuser', 'TEST_MODIFY'): True,
('testuser', 'TEST_ADMIN'): None})
def test_product_trac_admin_success(self):
"""TRAC_ADMIN in global env also valid in product env
"""
self.global_perm_admin._do_add('testuser', 'TRAC_ADMIN')
self.assertTrue(self.perm.has_permission('TRAC_ADMIN'))
def test_product_trac_admin_actions(self):
"""Allow all actions in product scope for TRAC_ADMIN
"""
self.global_perm_admin._do_add('testuser', 'TRAC_ADMIN')
all_actions = self.permsys.get_actions()
self.assertEquals(['TEST_CREATE', 'EMAIL_VIEW', 'TRAC_ADMIN',
'TEST_DELETE', 'TEST_MODIFY', 'PRODUCT_ADMIN',
'TEST_ADMIN'], all_actions)
self.assertEquals({}, self.permsys.get_user_permissions('testuser'))
for action in all_actions:
self.assertTrue(self.perm.has_permission(action),
'Check for permission action %s' % (action,))
self.assertFalse(self.perm.has_permission('UNKNOWN_PERM'))
# Clear permissions cache and retry
self.perm._cache.clear()
self.global_perm_admin._do_remove('testuser', 'TRAC_ADMIN')
all_actions = self.permsys.get_actions()
self.assertEquals(['TEST_CREATE', 'EMAIL_VIEW', 'TRAC_ADMIN',
'TEST_DELETE', 'TEST_MODIFY', 'PRODUCT_ADMIN',
'TEST_ADMIN'], all_actions)
self.assertEquals({}, self.permsys.get_user_permissions('testuser'))
for action in all_actions:
self.assertFalse(self.perm.has_permission(action),
'Check for permission action %s' % (action,))
def test_product_trac_admin_fail_local(self):
"""TRAC_ADMIN granted in product env will be ignored
"""
try:
# Not needed but added just in case , also for readability
self.global_perm_admin._do_remove('testuser', 'TRAC_ADMIN')
except AdminCommandError:
pass
# Setting TRAC_ADMIN permission in product scope is in vain
# since it controls access to critical actions affecting the whole site
# This will protect the system against malicious actors
# and / or failures leading to the addition of TRAC_ADMIN permission
# in product perm store in spite of obtaining unrighteous super powers.
# On the other hand this also means that PRODUCT_ADMIN(s) are
# able to set user permissions at will without jeopardizing system
# integrity and stability.
self.product_perm_admin._do_add('testuser', 'TRAC_ADMIN')
self.assertFalse(self.perm.has_permission('TRAC_ADMIN'))
def test_product_owner_perm(self):
"""Product owner automatically granted with PRODUCT_ADMIN
"""
self.assertIs(self.env.product.owner, None)
self.assertFalse(self.perm.has_permission('PRODUCT_ADMIN'))
self.env.product.owner = 'testuser'
# FIXME: update really needed ?
self.env.product.update()
try:
# Not needed but added just in case , also for readability
self.global_perm_admin._do_remove('testuser', 'TRAC_ADMIN')
except AdminCommandError:
pass
self.perm._cache.clear()
self.assertTrue(self.perm.has_permission('PRODUCT_ADMIN'))
self.assertFalse(self.perm.has_permission('TRAC_ADMIN'))
def test_new_product_perm(self):
"""Only product owner and TRAC_ADMIN will access new product
"""
newproduct = Product(self.global_env)
newproduct.prefix = 'NEW'
newproduct.name = 'New product'
newproduct.owner = 'owneruser'
newproduct.insert()
env = ProductEnvironment(self.global_env, newproduct)
self.global_perm_admin._do_add('adminuser', 'TRAC_ADMIN')
admin_perm = perm.PermissionCache(env, 'adminuser')
owner_perm = perm.PermissionCache(env, 'owneruser')
user_perm = perm.PermissionCache(env, 'testuser')
global_permsys = perm.PermissionSystem(self.global_env)
permsys = perm.PermissionSystem(env)
self.assertEquals({'EMAIL_VIEW': True, 'TEST_ADMIN': True,
'TEST_CREATE': True, 'TEST_DELETE': True,
'TEST_MODIFY': True, 'TRAC_ADMIN' : True},
global_permsys.get_user_permissions('adminuser'))
self.assertEquals({}, global_permsys.get_user_permissions('owneruser'))
self.assertEquals({}, global_permsys.get_user_permissions('testuser'))
self.assertEquals({}, permsys.get_user_permissions('adminuser'))
self.assertEquals({}, permsys.get_user_permissions('owneruser'))
self.assertEquals({}, permsys.get_user_permissions('testuser'))
all_actions = self.permsys.get_actions()
all_actions.remove('TRAC_ADMIN')
for action in all_actions:
self.assertTrue(admin_perm.has_permission(action))
self.assertTrue(owner_perm.has_permission(action))
self.assertFalse(user_perm.has_permission(action))
self.assertTrue(admin_perm.has_permission('TRAC_ADMIN'))
self.assertFalse(owner_perm.has_permission('TRAC_ADMIN'))
self.assertFalse(user_perm.has_permission('TRAC_ADMIN'))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProductDefaultPermissionStoreTestCase,
'test'))
suite.addTest(unittest.makeSuite(ProductPermissionSystemTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductPermissionCacheTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductNeighborhoodPermissionCacheTestCase,
'test'))
suite.addTest(unittest.makeSuite(ProductPermissionPolicyTestCase, 'test'))
suite.addTest(unittest.makeSuite(SudoTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/env.py | bloodhound_multiproduct/tests/env.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's product environments"""
import os.path
import shutil
import sys
import tempfile
from inspect import stack
from tests import unittest
from types import MethodType
from trac.admin.api import AdminCommandManager, IAdminCommandProvider
from trac.config import Option
from trac.core import Component, ComponentMeta, implements
from trac.env import Environment
from trac.test import EnvironmentStub, MockPerm
from trac.tests.env import EnvironmentTestCase
from trac.ticket.report import ReportModule
from trac.ticket.web_ui import TicketModule
from trac.util.text import to_unicode
from trac.web.href import Href
from multiproduct.api import DB_VERSION, MultiProductSystem
from multiproduct.env import ProductEnvironment
from multiproduct.model import Product
_AssertRaisesContext = unittest.case._AssertRaisesContext
class ProductEnvironmentStub(ProductEnvironment):
r"""A product environment slightly tweaked for testing purposes
"""
def get_known_users(self, cnx=None):
return self.known_users
# FIXME: Subclass TestCase explictly ?
class MultiproductTestCase(unittest.TestCase):
r"""Mixin providing access to multi-product testing extensions.
This class serves to the purpose of upgrading existing Trac test cases
with multi-product super-powers while still providing the foundations
to create product-specific subclasses.
"""
# unittest2 extensions
exceptFailureMessage = None
class _AssertRaisesLoggingContext(_AssertRaisesContext):
"""Add logging capabilities to assertRaises
"""
def __init__(self, expected, test_case, expected_regexp=None):
_AssertRaisesContext.__init__(self, expected, test_case,
expected_regexp)
self.test_case = test_case
@staticmethod
def _tb_locals(tb):
if tb is None:
# Inspect interpreter stack two levels up
ns = stack()[2][0].f_locals.copy()
else:
# Traceback already in context
ns = tb.tb_frame.f_locals.copy()
ns.pop('__builtins__', None)
return ns
def __exit__(self, exc_type, exc_value, tb):
try:
return _AssertRaisesContext.__exit__(self, exc_type,
exc_value, tb)
except self.failureException, exc:
msg = self.test_case.exceptFailureMessage
if msg is not None:
standardMsg = str(exc)
msg = msg % self._tb_locals(tb)
msg = self.test_case._formatMessage(msg, standardMsg)
raise self.failureException(msg)
else:
raise
finally:
# Clear message placeholder
self.test_case.exceptFailureMessage = None
def assertRaises(self, excClass, callableObj=None, *args, **kwargs):
"""Adds logging capabilities on top of unittest2 implementation.
"""
if callableObj is None:
return self._AssertRaisesLoggingContext(excClass, self)
else:
return unittest.TestCase.assertRaises(self, excClass, callableObj,
*args, **kwargs)
# Product data
default_product = 'tp1'
MAX_TEST_PRODUCT = 3
PRODUCT_DATA = {
'tp1': {
'prefix': 'tp1',
'name': 'test product 1',
'description': 'desc for tp1',
},
'tp2': {
'prefix': 'tp2',
'name': 'test product 2',
'description': 'desc for tp2',
},
u'xü': {
'prefix': u'xü',
'name': 'Non-ASCII chars',
'description': 'Unicode chars in name',
},
u'Überflüssigkeit': {
'prefix': u'Überflüssigkeit',
'name': 'Non-ASCII chars (long)',
'description': 'Long name with unicode chars',
},
'Foo Bar': {
'prefix': 'Foo Bar',
'name': 'Whitespaces',
'description': 'Whitespace chars in name',
},
'Foo Bar#baz': {
'prefix': 'Foo Bar#baz',
'name': 'Non-alphanumeric',
'description': 'Special chars in name',
},
'pl/de': {
'prefix': 'pl/de',
'name': 'Path separator',
'description': 'URL path separator in name',
},
}
# Test setup
def _setup_test_env(self, create_folder=True, path=None, **kwargs):
r"""Prepare a new test environment .
Optionally set its path to a meaningful location (temp folder
if `path` is `None`).
"""
MultiProductSystem.FakePermClass = MockPerm
kwargs.setdefault('enable', ['trac.*', 'multiproduct.*'])
self.env = env = EnvironmentStub(**kwargs)
if create_folder:
if path is None:
env.path = tempfile.mkdtemp(prefix='bh-tempenv-')
else:
env.path = path
if not os.path.exists(env.path):
os.mkdir(env.path)
conf_dir = os.path.join(env.path, 'conf')
if not os.path.exists(conf_dir):
os.mkdir(conf_dir)
return env
def _setup_test_log(self, env):
r"""Ensure test product with prefix is loaded
"""
if not hasattr(env, 'path') or not env.path:
env.path = tempfile.mkdtemp(prefix='bh-product-tempenv-')
log_dir = os.path.join(env.path, 'log')
if not os.path.exists(log_dir):
os.mkdir(log_dir)
log_file = os.path.join(log_dir, 'trac-testing.log')
config = env.config
config.set('logging', 'log_file', log_file)
config.set('logging', 'log_type', 'file')
config.set('logging', 'log_level', 'DEBUG')
# Log SQL queries
config.set('trac', 'debug_sql', True)
config.save()
env.setup_log()
env.log.info('%s test case: %s %s', '-' * 10, self.id(), '-' * 10)
# Clean-up logger instance and associated handler
# Otherwise large test suites will only result in ERROR eventually
# (at least in Unix systems) with messages
#
# TracError: Error reading '/path/to/file', make sure it is readable.
# error: /path/to/: Too many open files
self.addCleanup(self._teardown_test_log, env)
def _teardown_test_log(self, env):
if env.log and hasattr(env, '_log_handler'):
env.log.removeHandler(env._log_handler)
env._log_handler.flush()
env._log_handler.close()
del env._log_handler
@classmethod
def _load_product_from_data(cls, env, prefix):
r"""Ensure test product with prefix is loaded
"""
# TODO: Use fixtures implemented in #314
product_data = cls.PRODUCT_DATA[prefix]
prefix = to_unicode(prefix)
product = Product(env)
product._data.update(product_data)
product.insert()
@classmethod
def _upgrade_mp(cls, env):
r"""Apply multi product upgrades
"""
# Do not break wiki parser ( see #373 )
EnvironmentStub.disable_component_in_config(env, TicketModule)
EnvironmentStub.disable_component_in_config(env, ReportModule)
mpsystem = MultiProductSystem(env)
with env.db_transaction as db:
try:
mpsystem.upgrade_environment(db)
except env.db_exc.OperationalError:
# Database is upgraded, but database version was deleted.
# Complete the upgrade by inserting default product.
mpsystem._insert_default_product(db)
finally:
# Ensure that multiproduct DB version is set to latest value
mpsystem._update_db_version(db, DB_VERSION)
# assume that the database schema has been upgraded, enable
# multi-product schema support in environment
env.enable_multiproduct_schema(True)
@classmethod
def _load_default_data(cls, env):
r"""Initialize environment with default data by respecting
values set in system table.
"""
from trac import db_default
env.log.debug('Loading default data')
with env.db_transaction as db:
for table, cols, vals in db_default.get_data(db):
if table != 'system':
db.executemany('INSERT INTO %s (%s) VALUES (%s)'
% (table, ','.join(cols),
','.join(['%s' for c in cols])), vals)
env.log.debug('Loaded default data')
def _mp_setup(self, **kwargs):
"""Shortcut for quick product-aware environment setup.
"""
self.env = self._setup_test_env(**kwargs)
self._upgrade_mp(self.env)
self._setup_test_log(self.env)
self._load_product_from_data(self.env, self.default_product)
class ProductEnvTestCase(EnvironmentTestCase, MultiproductTestCase):
r"""Test cases for Trac environments rewritten for product environments
"""
# Test setup
def setUp(self):
r"""Replace Trac environment with product environment
"""
EnvironmentTestCase.setUp(self)
try:
self.global_env = self.env
self._setup_test_log(self.global_env)
self._upgrade_mp(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
try:
self.env = ProductEnvironment(self.global_env,
self.default_product)
except:
# All tests should fail if anything goes wrong
self.global_env.log.exception(
'Error creating product environment')
self.env = None
except:
shutil.rmtree(self.env.path)
raise
def tearDown(self):
# Discard product environment
self.env = self.global_env
EnvironmentTestCase.tearDown(self)
class ProductEnvApiTestCase(MultiproductTestCase):
"""Assertions for Apache(TM) Bloodhound product-specific extensions in
[https://issues.apache.org/bloodhound/wiki/Proposals/BEP-0003 BEP 3]
"""
def setUp(self):
self._mp_setup()
self.product_env = ProductEnvironment(self.env, self.default_product)
def tearDown(self):
# Release reference to transient environment mock object
if self.env is not None:
try:
self.env.reset_db()
except self.env.db_exc.OperationalError:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
self.env = None
self.product_env = None
def test_attr_forward_parent(self):
"""Testing env.__getattr__"""
class EnvironmentAttrSandbox(EnvironmentStub):
"""Limit the impact of class edits so as to avoid race conditions
"""
self.longMessage = True
class AttrSuccess(Exception):
"""Exception raised when target method / property is actually
invoked.
"""
def property_mock(attrnm, expected_self):
def assertAttrFwd(instance):
self.assertIs(instance, expected_self,
"Mismatch in property '%s'" % (attrnm,))
raise AttrSuccess
return property(assertAttrFwd)
self.env.__class__ = EnvironmentAttrSandbox
try:
for attrnm in 'system_info_providers secure_cookies ' \
'project_admin_trac_url get_system_info get_version ' \
'get_templates_dir get_templates_dir get_log_dir ' \
'backup'.split():
original = getattr(Environment, attrnm)
if isinstance(original, MethodType):
translation = getattr(self.product_env, attrnm)
self.assertIs(translation.im_self, self.env,
"'%s' not bound to global env in product env"
% (attrnm,))
self.assertIs(translation.im_func, original.im_func,
"'%s' function differs in product env"
% (attrnm,))
elif isinstance(original, (property, Option)):
# Intercept property access e.g. properties, Option, ...
setattr(self.env.__class__, attrnm,
property_mock(attrnm, self.env))
self.exceptFailureMessage = 'Property %(attrnm)s'
with self.assertRaises(AttrSuccess) as cm_test_attr:
getattr(self.product_env, attrnm)
else:
self.fail("Environment member %s has unexpected type"
% (repr(original),))
finally:
self.env.__class__ = EnvironmentStub
for attrnm in 'component_activated _component_rules ' \
'enable_component get_known_users get_repository ' \
'_component_name'.split():
original = getattr(Environment, attrnm)
if isinstance(original, MethodType):
translation = getattr(self.product_env, attrnm)
self.assertIs(translation.im_self, self.product_env,
"'%s' not bound to product env" % (attrnm,))
self.assertIs(translation.im_func, original.im_func,
"'%s' function differs in product env"
% (attrnm,))
elif isinstance(original, property):
translation = getattr(ProductEnvironment, attrnm)
self.assertIs(original, translation,
"'%s' property differs in product env"
% (attrnm,))
def test_typecheck(self):
"""Testing env.__init__"""
self._load_product_from_data(self.env, 'tp2')
env2 = ProductEnvironment(self.product_env, 'tp2')
self.assertIs(env2, ProductEnvironment(self.env, 'tp2'))
def test_component_enable(self):
"""Testing env.is_component_enabled"""
class C(Component):
pass
# Let's pretend this was declared elsewhere
C.__module__ = 'dummy_module'
sys.modules['dummy_module'] = sys.modules[__name__]
global_env = self.env
product_env = self.product_env
def _test_component_enabled(cls):
cname = global_env._component_name(cls)
disable_component_in_config = global_env.disable_component_in_config
enable_component_in_config = global_env.enable_component_in_config
# cls initially disabled in both envs
disable_component_in_config(global_env, cls)
disable_component_in_config(product_env, cls)
expected_rules = {
'multiproduct': True,
'trac': True,
'trac.ticket.report.reportmodule': False,
'trac.ticket.web_ui.ticketmodule': False,
'trac.db': True,
cname: False,
}
self.assertEquals(expected_rules, global_env._component_rules)
self.assertEquals(expected_rules, product_env._component_rules)
self.assertFalse(global_env.is_component_enabled(cls))
self.assertFalse(product_env.is_component_enabled_local(cls))
self.assertIs(global_env[cls], None)
self.assertIs(product_env[cls], None)
# cls enabled in product env but not in global env
disable_component_in_config(global_env, cls)
enable_component_in_config(product_env, cls)
expected_rules[cname] = False
self.assertEquals(expected_rules, global_env._component_rules)
expected_rules[cname] = True
self.assertEquals(expected_rules, product_env._component_rules)
self.assertFalse(global_env.is_component_enabled(cls))
self.assertTrue(product_env.is_component_enabled_local(cls))
self.assertIs(global_env[cls], None)
self.assertIs(product_env[cls], None)
# cls enabled in both envs
enable_component_in_config(global_env, cls)
enable_component_in_config(product_env, cls)
expected_rules[cname] = True
self.assertEquals(expected_rules, global_env._component_rules)
expected_rules[cname] = True
self.assertEquals(expected_rules, product_env._component_rules)
self.assertTrue(global_env.is_component_enabled(cls))
self.assertTrue(product_env.is_component_enabled_local(cls))
self.assertIsNot(global_env[cls], None)
self.assertIsNot(product_env[cls], None)
# cls enabled in global env but not in product env
enable_component_in_config(global_env, cls)
disable_component_in_config(product_env, cls)
expected_rules[cname] = True
self.assertEquals(expected_rules, global_env._component_rules)
expected_rules[cname] = False
self.assertEquals(expected_rules, product_env._component_rules)
self.assertTrue(global_env.is_component_enabled(cls))
self.assertFalse(product_env.is_component_enabled_local(cls))
self.assertIsNot(global_env[cls], None)
self.assertIs(product_env[cls], None)
# Test the rules against custom , external component
_test_component_enabled(C)
for env in (global_env, product_env):
env.config.remove('components', env._component_name(C))
# Test the rules against Trac component class
_test_component_enabled(TicketModule)
# ComponentMeta._components is shared between multiple tests.
# Unregister class C as its fake module might break something else.
ComponentMeta._components.remove(C)
def test_href_is_lazy(self):
href = self.product_env.href
self.assertIs(href, self.product_env.href)
def test_abs_href_is_lazy(self):
abs_href = self.product_env.abs_href
self.assertIs(abs_href, self.product_env.abs_href)
def test_path_is_lazy(self):
path = self.product_env.path
self.assertIs(path, self.product_env.path)
def test_path(self):
"""Testing env.path"""
self.assertEqual(self.product_env.path,
os.path.join(self.env.path, 'products',
self.default_product))
def test_env_config_inheritance(self):
"""Testing env.config"""
global_config = self.env.config
product_config = self.product_env.config
# By default inherit global settings ...
global_config['section'].set('key', 'value1')
self.assertEquals('value1', global_config['section'].get('key'))
self.assertEquals('value1', product_config['section'].get('key'))
# ... but allow for overrides in product scope
product_config['section'].set('key', 'value2')
self.assertEquals('value1', global_config['section'].get('key'))
self.assertEquals('value2', product_config['section'].get('key'))
def test_parametric_singleton(self):
self.assertIs(self.product_env,
ProductEnvironment(self.env, self.default_product))
for prefix in self.PRODUCT_DATA:
if prefix != self.default_product:
self._load_product_from_data(self.env, prefix)
envgen1 = dict([prefix, ProductEnvironment(self.env, prefix)]
for prefix in self.PRODUCT_DATA)
envgen2 = dict([prefix, ProductEnvironment(self.env, prefix)]
for prefix in self.PRODUCT_DATA)
for prefix, env1 in envgen1.iteritems():
self.assertIs(env1, envgen2[prefix],
"Identity check (by prefix) '%s'" % (prefix,))
for prefix, env1 in envgen1.iteritems():
self.assertIs(env1, envgen2[prefix],
"Identity check (by prefix) '%s'" % (prefix,))
def load_product(prefix):
products = Product.select(self.env, where={'prefix' : prefix})
if not products:
raise LookupError('Missing product %s' % (prefix,))
else:
return products[0]
envgen3 = dict([prefix, ProductEnvironment(self.env,
load_product(prefix))]
for prefix in self.PRODUCT_DATA)
for prefix, env1 in envgen1.iteritems():
self.assertIs(env1, envgen3[prefix],
"Identity check (by product model) '%s'" % (prefix,))
class ProductEnvHrefTestCase(MultiproductTestCase):
"""Assertions for resolution of product environment's base URL
[https://issues.apache.org/bloodhound/wiki/Proposals/BEP-0003 BEP 3]
"""
def product_base_url(url_template):
def decorator(f):
f.product_base_url = url_template
return f
return decorator
def setUp(self):
self._mp_setup()
self.env.abs_href = Href('http://globalenv.com/trac.cgi')
url_pattern = getattr(getattr(self, self._testMethodName).im_func,
'product_base_url', '')
self.env.config.set('multiproduct', 'product_base_url', url_pattern)
self.env.config.set('trac', 'base_url', 'http://globalenv.com/trac.cgi')
self.product_env = ProductEnvironment(self.env, self.default_product)
def tearDown(self):
shutil.rmtree(os.path.dirname(self.env.path), ignore_errors=True)
# Release reference to transient environment mock object
if self.env is not None:
try:
self.env.reset_db()
except self.env.db_exc.OperationalError:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
self.env = None
self.product_env = None
@product_base_url('http://$(prefix)s.domain.tld/')
def test_href_subdomain(self):
"""Test product sub domain base URL
"""
self.assertEqual('/', self.product_env.href())
self.assertEqual('http://tp1.domain.tld', self.product_env.abs_href())
@product_base_url('/path/to/bloodhound/$(prefix)s')
def test_href_sibling_paths(self):
"""Test product base URL at sibling paths
"""
self.assertEqual('/trac.cgi/path/to/bloodhound/tp1',
self.product_env.href())
self.assertEqual('http://globalenv.com/trac.cgi/path/to/bloodhound/tp1',
self.product_env.abs_href())
@product_base_url('/$(envname)s/$(prefix)s')
def test_href_inherit_sibling_paths(self):
"""Test product base URL at sibling paths inheriting configuration.
"""
self.assertEqual('/trac.cgi/%s/tp1'
% os.path.split(self.env.path)[-1],
self.product_env.href())
self.assertEqual('http://globalenv.com/trac.cgi/%s/tp1'
% os.path.split(self.env.path)[-1],
self.product_env.abs_href())
@product_base_url('')
def test_href_default(self):
"""Test product base URL is to a default
"""
self.assertEqual('/trac.cgi/products/tp1', self.product_env.href())
self.assertEqual('http://globalenv.com/trac.cgi/products/tp1',
self.product_env.abs_href())
@product_base_url('/products/$(prefix)s')
def test_href_embed(self):
"""Test default product base URL /products/prefix
"""
self.assertEqual('/trac.cgi/products/tp1', self.product_env.href())
self.assertEqual('http://globalenv.com/trac.cgi/products/tp1',
self.product_env.abs_href())
@product_base_url('http://$(envname)s.tld/bh/$(prefix)s')
def test_href_complex(self):
"""Test complex product base URL
"""
self.assertEqual('/bh/tp1', self.product_env.href())
self.assertEqual('http://%s.tld/bh/tp1'
% os.path.split(self.env.path)[-1],
self.product_env.abs_href())
@product_base_url('http://$(prefix)s.$(envname)s.tld/')
def test_product_href_uses_multiproduct_product_base_url(self):
"""Test that [multiproduct] product_base_url is used to compute
abs_href for the product environment when [trac] base_url for
the product environment is an empty string (the default).
"""
# Global URLs
self.assertEqual('http://globalenv.com/trac.cgi', self.env.base_url)
self.assertEqual('/trac.cgi', self.env.href())
self.assertEqual('http://globalenv.com/trac.cgi', self.env.abs_href())
# Product URLs
self.assertEqual('', self.product_env.base_url)
self.assertEqual('/', self.product_env.href())
self.assertEqual('http://tp1.%s.tld'
% os.path.split(self.env.path)[-1],
self.product_env.abs_href())
@product_base_url('http://$(prefix)s.$(envname)s.tld/')
def test_product_href_uses_products_base_url(self):
"""Test that [trac] base_url for the product environment is used to
compute abs_href for the product environment when [trac] base_url
for the product environment is different than [trac] base_url for
the global environment.
"""
self.product_env.config.set('trac', 'base_url', 'http://productenv.com')
self.product_env.config.save()
self.assertEqual('http://productenv.com', self.product_env.base_url)
self.assertEqual('/', self.product_env.href())
self.assertEqual('http://productenv.com', self.product_env.abs_href())
@product_base_url('http://$(prefix)s.$(envname)s.tld/')
def test_product_href_global_and_product_base_urls_same(self):
"""Test that [multiproduct] product_base_url is used to compute
abs_href for the product environment when [trac] base_url is the same
for the product and global environment.
"""
self.product_env.config.set('trac', 'base_url',
self.env.config.get('trac', 'base_url'))
self.product_env.config.save()
self.assertEqual('', self.product_env.base_url)
self.assertEqual('/', self.product_env.href())
self.assertEqual('http://tp1.%s.tld'
% os.path.split(self.env.path)[-1],
self.product_env.abs_href())
product_base_url = staticmethod(product_base_url)
class ProductEnvConfigTestCase(MultiproductTestCase):
"""Test cases for product environment's configuration
"""
class DummyAdminCommand(Component):
"""Dummy class used for testing purposes
"""
implements(IAdminCommandProvider)
class DummyException(Exception):
pass
def do_fail(self, *args):
raise DummyException(args)
def get_admin_commands(self):
yield "fail", "[ARG]...", "Always fail", None, self.do_fail
def setUp(self):
self._mp_setup(create_folder=True)
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
# Random component class
self.component_class = self.DummyAdminCommand
def tearDown(self):
if self.global_env is not None:
try:
self.global_env.reset_db()
except self.global_env.db_exc.OperationalError:
# "Database not found ...",
# "OperationalError: no such table: system" or the like
pass
shutil.rmtree(self.env.path)
self.env = self.global_env = None
def test_regression_bh_539(self):
tracadmin = AdminCommandManager(self.env)
self.assertTrue(self.env[self.component_class] is None,
"Expected component disabled")
self.assertFalse(any(isinstance(c, self.component_class)
for c in tracadmin.providers),
"Component erroneously listed in admin cmd providers")
self.assertEqual([], tracadmin.get_command_help(args=['fail']))
# Enable component in both global and product context
cmd_args = ['config', 'set', 'components', __name__ + '.*', 'enabled']
AdminCommandManager(self.global_env).execute_command(*cmd_args)
tracadmin.execute_command(*cmd_args)
self.assertTrue(self.env[self.component_class] is not None,
"Expected component enabled")
self.assertTrue(any(isinstance(c, self.component_class)
for c in tracadmin.providers),
"Component not listed in admin cmd providers")
self.assertEqual(1, len(tracadmin.get_command_help(args=['fail'])))
def test_regression_bh_539_concurrent(self):
try:
# It is necessary to load another environment object to work around
# ProductEnvironment class' parametric singleton constraint
old_env = self.env
# In-memory DB has to be shared
self.global_env.__class__.global_databasemanager = \
self.env.global_databasemanager
new_global_env = self._setup_test_env(create_folder=True,
path=self.global_env.path)
self.env = old_env
self._setup_test_log(new_global_env)
# FIXME: EnvironmentStub config is not bound to a real file
# ... so let's reuse one config for both envs to simulate that they
# are in sync, a condition verified in another test case
new_global_env.config = self.global_env.config
new_env = ProductEnvironment(new_global_env, self.default_product)
self.assertTrue(new_global_env is not self.global_env)
self.assertTrue(new_env is not self.env)
self.assertEqual(self.env.path, new_env.path)
self.assertEqual(self.env.config._lock_path,
new_env.config._lock_path)
tracadmin = AdminCommandManager(self.env)
new_tracadmin = AdminCommandManager(new_env)
# Assertions for self.env
self.assertTrue(self.env[self.component_class] is None,
"Expected component disabled")
self.assertFalse(any(isinstance(c, self.component_class)
for c in tracadmin.providers),
"Component erroneously listed in admin cmd "
"providers")
self.assertEqual([], tracadmin.get_command_help(args=['fail']))
# Repeat assertions for new_env
self.assertTrue(new_env[self.component_class] is None,
"Expected component disabled")
self.assertFalse(any(isinstance(c, self.component_class)
for c in new_tracadmin.providers),
"Component erroneously listed in admin cmd "
"providers")
self.assertEqual([], new_tracadmin.get_command_help(args=['fail']))
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/db/api.py | bloodhound_multiproduct/tests/db/api.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from trac.db.tests.api import ParseConnectionStringTestCase, StringsTestCase, ConnectionTestCase, WithTransactionTest
from tests.db.util import ProductEnvMixin
class ProductParseConnectionStringTestCase(ParseConnectionStringTestCase, ProductEnvMixin):
pass
class ProductStringsTestCase(StringsTestCase, ProductEnvMixin):
pass
class ProductConnectionTestCase(ConnectionTestCase, ProductEnvMixin):
pass
class ProductWithTransactionTestCase(WithTransactionTest, ProductEnvMixin):
pass
def suite():
suite = unittest.TestSuite([
unittest.makeSuite(ParseConnectionStringTestCase, 'test'),
unittest.makeSuite(StringsTestCase, 'test'),
unittest.makeSuite(ConnectionTestCase, 'test'),
unittest.makeSuite(WithTransactionTest, 'test'),
unittest.makeSuite(ProductParseConnectionStringTestCase, 'test'),
unittest.makeSuite(ProductStringsTestCase, 'test'),
unittest.makeSuite(ProductConnectionTestCase, 'test'),
unittest.makeSuite(ProductWithTransactionTestCase, 'test'),
])
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/db/cursor.py | bloodhound_multiproduct/tests/db/cursor.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for multiproduct/dbcursor.py"""
import unittest
from multiproduct.dbcursor import BloodhoundProductSQLTranslate, SKIP_TABLES, TRANSLATE_TABLES, PRODUCT_COLUMN
# Test case data, each section consists of list of tuples of original and correctly translated SQL statements
data = {
# non-translated SELECTs
'system_select_nontranslated' : [
(
"""SELECT id,
name,
value
FROM repository
WHERE name IN ('alias',
'description',
'dir',
'hidden',
'name',
'type',
'url')""",
"""SELECT id,
name,
value
FROM repository
WHERE name IN ('alias',
'description',
'dir',
'hidden',
'name',
'type',
'url')"""
),
],
# translated SELECTs
'system_select_translated' : [
(
"""SELECT TYPE, id,
filename,
time,
description,
author
FROM attachment
WHERE time > %s
AND time < %s
AND TYPE = %s""",
"""SELECT TYPE, id,
filename,
time,
description,
author
FROM (SELECT * FROM attachment WHERE product='PRODUCT') AS attachment
WHERE time > %s
AND time < %s
AND TYPE = %s"""
),
(
"""SELECT name,
due,
completed,
description
FROM milestone
WHERE name=%s""",
"""SELECT name,
due,
completed,
description
FROM (SELECT * FROM milestone WHERE product='PRODUCT') AS milestone
WHERE name=%s"""
),
(
"""SELECT COALESCE(component, ''),
count(COALESCE(component, ''))
FROM ticket
GROUP BY COALESCE(component, '')""",
"""SELECT COALESCE(component, ''),
count(COALESCE(component, ''))
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
GROUP BY COALESCE(component, '')"""
),
(
"""SELECT id, time, reporter, TYPE, summary,
description
FROM ticket
WHERE time>=%s
AND time<=%s""",
"""SELECT id, time, reporter, TYPE, summary,
description
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
WHERE time>=%s
AND time<=%s"""
),
(
"""SELECT t.id,
tc.time,
tc.author,
t.type,
t.summary,
tc.field,
tc.oldvalue,
tc.newvalue
FROM ticket_change tc
INNER JOIN ticket t ON t.id = tc.ticket
AND tc.time>=1351375199999999
AND tc.time<=1354057199999999
ORDER BY tc.time""",
"""SELECT t.id,
tc.time,
tc.author,
t.type,
t.summary,
tc.field,
tc.oldvalue,
tc.newvalue
FROM (SELECT * FROM ticket_change WHERE product='PRODUCT') AS tc
INNER JOIN (SELECT * FROM ticket WHERE product='PRODUCT') AS t ON t.id = tc.ticket
AND tc.time>=1351375199999999
AND tc.time<=1354057199999999
ORDER BY tc.time"""
),
(
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,CAST(priority.value AS integer) DESC,t.id) AS x""",
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN (SELECT * FROM milestone WHERE product='PRODUCT') AS milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,CAST(priority.value AS integer) DESC,t.id) AS x"""
),
(
"""SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,
CAST(priority.value AS integer) DESC,t.id""",
"""SELECT t.id AS id,
t.summary AS summary,
t.owner AS OWNER,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN (SELECT * FROM milestone WHERE product='PRODUCT') AS milestone ON (milestone.name=milestone)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(t.milestone,'')='',
COALESCE(milestone.completed,0)=0,
milestone.completed,
COALESCE(milestone.due,0)=0,
milestone.due,
t.milestone,
COALESCE(priority.value,'')='' DESC,
CAST(priority.value AS integer) DESC,t.id"""
),
(
"""SELECT COUNT(*)
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab""",
"""SELECT COUNT(*)
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab"""
),
(
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS x""",
"""SELECT COUNT(*)
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS x"""
),
(
"""SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id""",
"""SELECT t.id AS id,
t.summary AS summary,
t.status AS status,
t.type AS TYPE,
t.priority AS priority,
t.product AS product,
t.milestone AS milestone,
t.time AS time,
t.changetime AS changetime,
t.owner AS OWNER,
priority.value AS priority_value
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT OUTER JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS priority ON (priority.TYPE='priority'
AND priority.name=priority)
WHERE ((COALESCE(t.status,'')!=%s)
AND (COALESCE(t.OWNER,'')=%s))
ORDER BY COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id"""
),
(
"""SELECT *
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab LIMIT 1""",
"""SELECT *
FROM
(SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time ) AS tab LIMIT 1"""
),
(
"""SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time""",
"""SELECT p.value AS __color__, id AS ticket, summary, component, VERSION, milestone, t.type AS TYPE, OWNER, status,
time AS created,
changetime AS _changetime,
description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority
AND p.TYPE = 'priority'
WHERE status <> 'closed'
ORDER BY CAST(p.value AS integer),
milestone,
t.TYPE, time"""
),
(
"""SELECT COALESCE(version, '') ,
count(COALESCE(version, ''))
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS owner,
t.type AS type,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.version AS version,
t.time AS time,
t.changetime AS changetime,
t.product AS product,
priority.value AS priority_value
FROM
(SELECT *
FROM ticket
WHERE product="default") AS t
LEFT OUTER JOIN
(SELECT *
FROM enum
WHERE product="default") AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN
(SELECT *
FROM version
WHERE product="default") AS version ON (version.name=version)
WHERE ((COALESCE(t.product,'')='default'))
ORDER BY COALESCE(t.version,'')='',
COALESCE(version.time,0)=0,version.time,
t.version,COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS foo
GROUP BY COALESCE(version, '')""",
"""SELECT COALESCE(version, '') ,
count(COALESCE(version, ''))
FROM
(SELECT t.id AS id,
t.summary AS summary,
t.owner AS owner,
t.type AS type,
t.status AS status,
t.priority AS priority,
t.milestone AS milestone,
t.version AS version,
t.time AS time,
t.changetime AS changetime,
t.product AS product,
priority.value AS priority_value
FROM
(SELECT *
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket
WHERE product="default") AS t
LEFT OUTER JOIN
(SELECT *
FROM (SELECT * FROM enum WHERE product='PRODUCT') AS enum
WHERE product="default") AS priority ON (priority.type='priority'
AND priority.name=priority)
LEFT OUTER JOIN
(SELECT *
FROM (SELECT * FROM version WHERE product='PRODUCT') AS version
WHERE product="default") AS version ON (version.name=version)
WHERE ((COALESCE(t.product,'')='default'))
ORDER BY COALESCE(t.version,'')='',
COALESCE(version.time,0)=0,version.time,
t.version,COALESCE(priority.value,'')='',
CAST(priority.value AS integer),
t.id) AS foo
GROUP BY COALESCE(version, '')"""
),
(
"""SELECT w1.name, w1.time, w1.author, w1.text
FROM wiki w1,(SELECT name, max(version) AS ver
FROM wiki GROUP BY name) w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND (w1.name LIKE %s ESCAPE '/' OR w1.author LIKE %s ESCAPE '/' OR w1.text LIKE %s ESCAPE '/')""",
"""SELECT w1.name, w1.time, w1.author, w1.text
FROM (SELECT * FROM wiki WHERE product='PRODUCT') AS w1,(SELECT name, max(version) AS ver
FROM (SELECT * FROM wiki WHERE product='PRODUCT') AS wiki GROUP BY name) AS w2
WHERE w1.version = w2.ver AND w1.name = w2.name
AND (w1.name LIKE %s ESCAPE '/' OR w1.author LIKE %s ESCAPE '/' OR w1.text LIKE %s ESCAPE '/')"""
),
(
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords)
SELECT id, 'defect', time, changetime, component, severity, priority, owner,
reporter, cc, version, milestone, status, resolution, summary,
description, keywords FROM ticket_old
WHERE COALESCE(severity,'') <> 'enhancement'""",
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords, product)
SELECT id, 'defect', time, changetime, component, severity, priority, owner,
reporter, cc, version, milestone, status, resolution, summary,
description, keywords, 'PRODUCT' FROM (SELECT * FROM "PRODUCT_ticket_old") AS ticket_old
WHERE COALESCE(severity,'') <> 'enhancement'"""
),
(
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords)
SELECT id, 'enhancement', time, changetime, component, 'normal', priority,
owner, reporter, cc, version, milestone, status, resolution, summary,
description, keywords FROM ticket_old
WHERE severity = 'enhancement'""",
"""INSERT INTO ticket(id, type, time, changetime, component, severity, priority,
owner, reporter, cc, version, milestone, status, resolution,
summary, description, keywords, product)
SELECT id, 'enhancement', time, changetime, component, 'normal', priority,
owner, reporter, cc, version, milestone, status, resolution, summary,
description, keywords, 'PRODUCT' FROM (SELECT * FROM "PRODUCT_ticket_old") AS ticket_old
WHERE severity = 'enhancement'"""
),
(
"""SELECT COUNT(*) FROM (
SELECT __color__, __group,
(CASE
WHEN __group = 1 THEN 'Accepted'
WHEN __group = 2 THEN 'Owned'
WHEN __group = 3 THEN 'Reported'
ELSE 'Commented' END) AS __group__,
ticket, summary, component, version, milestone,
type, priority, created, _changetime, _description,
_reporter
FROM (
SELECT DISTINCT CAST(p.value AS integer) AS __color__,
(CASE
WHEN owner = %s AND status = 'accepted' THEN 1
WHEN owner = %s THEN 2
WHEN reporter = %s THEN 3
ELSE 4 END) AS __group,
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM ticket t
LEFT JOIN enum p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN ticket_change tc ON tc.ticket = t.id AND tc.author = %s
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND (owner = %s OR reporter = %s OR author = %s)
) AS sub
ORDER BY __group, __color__, milestone, type, created
) AS tab""",
"""SELECT COUNT(*) FROM (
SELECT __color__, __group,
(CASE
WHEN __group = 1 THEN 'Accepted'
WHEN __group = 2 THEN 'Owned'
WHEN __group = 3 THEN 'Reported'
ELSE 'Commented' END) AS __group__,
ticket, summary, component, version, milestone,
type, priority, created, _changetime, _description,
_reporter
FROM (
SELECT DISTINCT CAST(p.value AS integer) AS __color__,
(CASE
WHEN owner = %s AND status = 'accepted' THEN 1
WHEN owner = %s THEN 2
WHEN reporter = %s THEN 3
ELSE 4 END) AS __group,
t.id AS ticket, summary, component, version, milestone,
t.type AS type, priority, t.time AS created,
t.changetime AS _changetime, description AS _description,
reporter AS _reporter
FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS t
LEFT JOIN (SELECT * FROM enum WHERE product='PRODUCT') AS p ON p.name = t.priority AND p.type = 'priority'
LEFT JOIN (SELECT * FROM ticket_change WHERE product='PRODUCT') AS tc ON tc.ticket = t.id AND tc.author = %s
AND tc.field = 'comment'
WHERE t.status <> 'closed'
AND (owner = %s OR reporter = %s OR author = %s)
) AS sub
ORDER BY __group, __color__, milestone, type, created
) AS tab"""
),
],
# custom table SELECTs
'custom_select' : [
(
"""SELECT bklg_id, count(*) as total
FROM backlog_ticket
WHERE tkt_order IS NULL OR tkt_order > -1
GROUP BY bklg_id
""",
"""SELECT bklg_id, count(*) as total
FROM (SELECT * FROM "PRODUCT_backlog_ticket") AS backlog_ticket
WHERE tkt_order IS NULL OR tkt_order > -1
GROUP BY bklg_id
"""
),
(
"""SELECT bt.bklg_id, t.status, count(*) as total
FROM backlog_ticket bt, ticket t
WHERE t.id = bt.tkt_id
AND (bt.tkt_order IS NULL OR bt.tkt_order > -1)
GROUP BY bklg_id, status""",
"""SELECT bt.bklg_id, t.status, count(*) as total
FROM (SELECT * FROM "PRODUCT_backlog_ticket") AS bt, (SELECT * FROM ticket WHERE product='PRODUCT') AS t
WHERE t.id = bt.tkt_id
AND (bt.tkt_order IS NULL OR bt.tkt_order > -1)
GROUP BY bklg_id, status"""
),
],
# non-translated INSERTs
'system_insert_nontranslated' : [
(
"""INSERT INTO session VALUES (%s,%s,0)""",
"""INSERT INTO session VALUES (%s,%s,0)"""
),
],
# translated INSERTs
'system_insert_translated' : [
(
"""INSERT INTO ticket_custom (ticket, name, value)
SELECT id, 'totalhours', '0' FROM ticket WHERE id NOT IN (
SELECT ticket from ticket_custom WHERE name='totalhours'
)""",
"""INSERT INTO ticket_custom (ticket, name, value, product)
SELECT id, 'totalhours', '0', 'PRODUCT' FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket WHERE id NOT IN (
SELECT ticket from (SELECT * FROM ticket_custom WHERE product='PRODUCT') AS ticket_custom WHERE name='totalhours'
)"""
),
(
"""INSERT INTO ticket_custom (ticket, name, value)
SELECT id, 'totalhours', '0' FROM ticket WHERE id NOT IN (
SELECT ticket from ticket_custom WHERE name='totalhours')""",
"""INSERT INTO ticket_custom (ticket, name, value, product)
SELECT id, 'totalhours', '0', 'PRODUCT' FROM (SELECT * FROM ticket WHERE product='PRODUCT') AS ticket WHERE id NOT IN (
SELECT ticket from (SELECT * FROM ticket_custom WHERE product='PRODUCT') AS ticket_custom WHERE name='totalhours')"""
),
(
"""INSERT INTO session (sid, last_visit, authenticated)
SELECT distinct s.sid,COALESCE(%s,0),s.authenticated
FROM session_old AS s LEFT JOIN session_old AS s2
ON (s.sid=s2.sid AND s2.var_name='last_visit')
WHERE s.sid IS NOT NULL""",
"""INSERT INTO session (sid, last_visit, authenticated)
SELECT distinct s.sid,COALESCE(%s,0),s.authenticated
FROM (SELECT * FROM "PRODUCT_session_old") AS s LEFT JOIN (SELECT * FROM "PRODUCT_session_old") AS s2
ON (s.sid=s2.sid AND s2.var_name='last_visit')
WHERE s.sid IS NOT NULL"""
),
(
"""INSERT INTO session_attribute (sid, authenticated, name, value)
SELECT s.sid, s.authenticated, s.var_name, s.var_value
FROM session_old s
WHERE s.var_name <> 'last_visit' AND s.sid IS NOT NULL""",
"""INSERT INTO session_attribute (sid, authenticated, name, value)
SELECT s.sid, s.authenticated, s.var_name, s.var_value
FROM (SELECT * FROM "PRODUCT_session_old") AS s
WHERE s.var_name <> 'last_visit' AND s.sid IS NOT NULL"""
),
(
"""INSERT INTO wiki(version, name, time, author, ipnr, text)
SELECT 1 + COALESCE(max(version), 0), %s, %s, 'trac',
'127.0.0.1', %s FROM wiki WHERE name=%s""",
"""INSERT INTO wiki(version, name, time, author, ipnr, text, product)
SELECT 1 + COALESCE(max(version), 0), %s, %s, 'trac',
'127.0.0.1', %s, 'PRODUCT' FROM (SELECT * FROM wiki WHERE product='PRODUCT') AS wiki WHERE name=%s"""
),
(
"""INSERT INTO permission VALUES ('dev','WIKI_VIEW')""",
"""INSERT INTO permission VALUES ('dev','WIKI_VIEW','PRODUCT')"""
),
(
"""INSERT INTO permission (username, action) VALUES ('dev','WIKI_VIEW')""",
"""INSERT INTO permission (username, action, product) VALUES ('dev','WIKI_VIEW','PRODUCT')"""
),
],
'custom_insert' : [
(
"""INSERT INTO node_change (rev,path,kind,change,base_path,base_rev)
SELECT rev,path,kind,change,base_path,base_rev FROM node_change_old""",
"""INSERT INTO node_change (rev,path,kind,change,base_path,base_rev)
SELECT rev,path,kind,change,base_path,base_rev FROM (SELECT * FROM "PRODUCT_node_change_old") AS node_change_old"""
),
],
# translated UPDATEs
'system_update_translated' : [
(
"""UPDATE ticket SET changetime=%s WHERE id=%s""",
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/db/util.py | bloodhound_multiproduct/tests/db/util.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductEnvMixin(MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/db/mysql.py | bloodhound_multiproduct/tests/db/mysql.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from trac.db.tests.mysql_test import MySQLTableAlterationSQLTest
from tests.db.util import ProductEnvMixin
class ProductMySQLTableAlterationSQLTest(MySQLTableAlterationSQLTest, ProductEnvMixin):
pass
def suite():
suite = unittest.TestSuite([
unittest.makeSuite(MySQLTableAlterationSQLTest, 'test'),
unittest.makeSuite(ProductMySQLTableAlterationSQLTest, 'test'),
])
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/db/postgres.py | bloodhound_multiproduct/tests/db/postgres.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from trac.db.tests.postgres_test import PostgresTableCreationSQLTest, PostgresTableAlterationSQLTest
from tests.db.util import ProductEnvMixin
class ProductPostgresTableCreationSQLTest(PostgresTableCreationSQLTest, ProductEnvMixin):
pass
class ProductPostgresTableAlterationSQLTest(PostgresTableAlterationSQLTest, ProductEnvMixin):
pass
def suite():
suite = unittest.TestSuite([
unittest.makeSuite(PostgresTableCreationSQLTest, 'test'),
unittest.makeSuite(PostgresTableAlterationSQLTest, 'test'),
unittest.makeSuite(ProductPostgresTableCreationSQLTest, 'test'),
unittest.makeSuite(ProductPostgresTableAlterationSQLTest, 'test'),
])
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/db/__init__.py | bloodhound_multiproduct/tests/db/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests import TestLoader
def test_suite():
return TestLoader().discover_package(__package__, pattern='*.py')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/wikisyntax.py | bloodhound_multiproduct/tests/ticket/wikisyntax.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for inherited Apache(TM) Bloodhound ticket wiki syntax
in product environments"""
import os.path
import re
import unittest
from trac.ticket.tests import wikisyntax
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
from tests.wiki import formatter
def test_suite():
suite = unittest.TestSuite()
suite.addTest(formatter.test_suite(wikisyntax.TICKET_TEST_CASES,
wikisyntax.ticket_setup,
wikisyntax.__file__,
wikisyntax.ticket_teardown))
suite.addTest(formatter.test_suite(wikisyntax.REPORT_TEST_CASES,
wikisyntax.report_setup,
wikisyntax.__file__))
suite.addTest(formatter.test_suite(wikisyntax.MILESTONE_TEST_CASES,
wikisyntax.milestone_setup,
wikisyntax.__file__,
wikisyntax.milestone_teardown))
suite.addTest(formatter.test_suite(wikisyntax.QUERY_TEST_CASES,
wikisyntax.ticket_setup,
wikisyntax.__file__,
wikisyntax.ticket_teardown))
suite.addTest(formatter.test_suite(wikisyntax.QUERY2_TEST_CASES,
wikisyntax.query2_setup,
wikisyntax.__file__,
wikisyntax.query2_teardown))
suite.addTest(formatter.test_suite(wikisyntax.COMMENT_TEST_CASES,
file=wikisyntax.__file__))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/report.py | bloodhound_multiproduct/tests/ticket/report.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's ticket reports in product environments"""
import unittest
from trac.ticket.tests.report import ReportTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductReportTestCase(ReportTestCase, MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_suite():
return unittest.TestSuite([
# TODO : Put report doctests in product context
# doctest.DocTestSuite(trac.ticket.report,)
unittest.makeSuite(ProductReportTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/api.py | bloodhound_multiproduct/tests/ticket/api.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's tickets API in product environments"""
import unittest
from trac.perm import PermissionCache, PermissionSystem
from trac.test import Mock
from trac.ticket.api import TicketSystem
from trac.ticket.tests.api import TicketSystemTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductTicketSystemTestCase(TicketSystemTestCase, MultiproductTestCase):
def setUp(self):
self.global_env = self._setup_test_env(create_folder=False)
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self.env = ProductEnvironment(self.global_env, self.default_product)
self.perm = PermissionSystem(self.env)
self.ticket_system = TicketSystem(self.env)
self.req = Mock()
def tearDown(self):
self.global_env.reset_db()
def test_custom_field_isolation(self):
self.env.config.set('ticket-custom', 'test', 'select')
self.env.config.set('ticket-custom', 'test.label', 'Test')
self.env.config.set('ticket-custom', 'test.value', '1')
self.env.config.set('ticket-custom', 'test.options', 'option1|option2')
self.global_env.config.set('ticket-custom', 'test', 'text')
self.global_env.config.set('ticket-custom', 'test.label', 'Test')
self.global_env.config.set('ticket-custom', 'test.value', 'Foo bar')
self.global_env.config.set('ticket-custom', 'test.format', 'wiki')
product_fields = TicketSystem(self.env).get_custom_fields()
global_fields = TicketSystem(self.global_env).get_custom_fields()
self.assertEqual({'name': 'test', 'type': 'select', 'label': 'Test',
'value': '1', 'options': ['option1', 'option2'],
'order': 0},
product_fields[0])
self.assertEqual({'name': 'test', 'type': 'text', 'label': 'Test',
'value': 'Foo bar', 'order': 0, 'format': 'wiki'},
global_fields[0])
def test_available_actions_isolation(self):
# Grant TICKET_CREATE in product environment ...
self.perm.grant_permission('anonymous', 'TICKET_CREATE')
self.req.perm = PermissionCache(self.env)
self.assertEqual(['leave', 'reopen'],
self._get_actions({'status': 'closed'}))
# ... but no perms in global environment
self.req.perm = PermissionCache(self.global_env)
product_env = self.env
try:
self.env = self.global_env
self.assertEqual(['leave'], self._get_actions({'status': 'closed'}))
finally:
self.env = product_env
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductTicketSystemTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/query.py | bloodhound_multiproduct/tests/ticket/query.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's ticket queries in product environments"""
import unittest
from trac.ticket.query import Query
from trac.ticket.tests.query import QueryTestCase, QueryLinksTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductQueryTestCase(QueryTestCase, MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_all_grouped_by_milestone(self):
query = Query(self.env, order='id', group='milestone')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
ORDER BY COALESCE(t.milestone,'')='',COALESCE(milestone.completed,0)=0,milestone.completed,COALESCE(milestone.due,0)=0,milestone.due,t.milestone,COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_grouped_by_milestone_desc(self):
query = Query(self.env, order='id', group='milestone', groupdesc=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.milestone AS milestone,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
LEFT OUTER JOIN milestone ON (milestone.name=milestone)
ORDER BY COALESCE(t.milestone,'')='' DESC,COALESCE(milestone.completed,0)=0 DESC,milestone.completed DESC,COALESCE(milestone.due,0)=0 DESC,milestone.due DESC,t.milestone DESC,COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id(self):
query = Query(self.env, order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id_desc(self):
query = Query(self.env, order='id', desc=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0 DESC,t.id DESC""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id_from_unicode(self):
query = Query.from_string(self.env, u'order=id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_id_verbose(self):
query = Query(self.env, order='id', verbose=1)
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.reporter AS reporter,t.description AS description,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_priority(self):
query = Query(self.env) # priority is default order
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(priority.value,'')='',%(cast_priority)s,t.id""" % {
'cast_priority': self.env.get_read_db().cast('priority.value', 'int')})
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_all_ordered_by_priority_desc(self):
query = Query(self.env, desc=1) # priority is default order
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(priority.value,'')='' DESC,%(cast_priority)s DESC,t.id""" % {
'cast_priority': self.env.get_read_db().cast('priority.value', 'int')})
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_custom_field(self):
self.env.config.set('ticket-custom', 'foo', 'text')
query = Query.from_string(self.env, 'foo=something', order='id')
sql, args = query.get_sql()
foo = self.env.get_read_db().quote('foo')
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value,%s.value AS %s
FROM ticket AS t
LEFT OUTER JOIN ticket_custom AS %s ON (id=%s.ticket AND %s.name='foo')
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(%s.value,'')=%%s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % ((foo,) * 6))
self.assertEqual(['something'], args)
tickets = query.execute(self.req)
def test_constrained_by_empty_value_contains(self):
query = Query.from_string(self.env, 'owner~=|', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_empty_value_endswith(self):
query = Query.from_string(self.env, 'owner$=|', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_empty_value_startswith(self):
query = Query.from_string(self.env, 'owner^=|', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_constrained_by_milestone(self):
query = Query.from_string(self.env, 'milestone=milestone1', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,t.milestone AS milestone,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.milestone,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['milestone1'], args)
tickets = query.execute(self.req)
def test_constrained_by_milestone_or_version(self):
query = Query.from_string(self.env, 'milestone=milestone1&or&version=version1', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,t.version AS version,t.milestone AS milestone,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.milestone,'')=%s)) OR ((COALESCE(t.version,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['milestone1', 'version1'], args)
tickets = query.execute(self.req)
def test_constrained_by_multiple_owners(self):
query = Query.from_string(self.env, 'owner=someone|someone_else',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.owner,'') IN (%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['someone', 'someone_else'], args)
tickets = query.execute(self.req)
def test_constrained_by_multiple_owners_contain(self):
query = Query.from_string(self.env, 'owner~=someone|someone_else',
order='id')
sql, args = query.get_sql()
self.assertEqual(['%someone%', '%someone/_else%'], args)
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s OR COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
tickets = query.execute(self.req)
def test_constrained_by_multiple_owners_not(self):
query = Query.from_string(self.env, 'owner!=someone|someone_else',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.owner,'') NOT IN (%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['someone', 'someone_else'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_beginswith(self):
query = Query.from_string(self.env, 'owner^=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['someone%'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_containing(self):
query = Query.from_string(self.env, 'owner~=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%someone%'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_endswith(self):
query = Query.from_string(self.env, 'owner$=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%someone'], args)
tickets = query.execute(self.req)
def test_constrained_by_owner_not_containing(self):
query = Query.from_string(self.env, 'owner!~=someone', order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'') NOT %(like)s))
ORDER BY COALESCE(t.id,0)=0,t.id""" % {'like': self.env.get_read_db().like()})
self.assertEqual(['%someone%'], args)
tickets = query.execute(self.req)
def test_constrained_by_status(self):
query = Query.from_string(self.env, 'status=new|assigned|reopened',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.status AS status,t.owner AS owner,t.type AS type,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.status,'') IN (%s,%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['new', 'assigned', 'reopened'], args)
tickets = query.execute(self.req)
def test_equal_in_value(self):
query = Query.from_string(self.env, r'status=this=that&version=version1',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.priority AS priority,t.product AS product,t.milestone AS milestone,t.status AS status,t.time AS time,t.changetime AS changetime,t.version AS version,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.status,'')=%s) AND (COALESCE(t.version,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['this=that', 'version1'], args)
tickets = query.execute(self.req)
def test_grouped_by_custom_field(self):
self.env.config.set('ticket-custom', 'foo', 'text')
query = Query(self.env, group='foo', order='id')
sql, args = query.get_sql()
foo = self.env.get_read_db().quote('foo')
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value,%s.value AS %s
FROM ticket AS t
LEFT OUTER JOIN ticket_custom AS %s ON (id=%s.ticket AND %s.name='foo')
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(%s.value,'')='',%s.value,COALESCE(t.id,0)=0,t.id""" %
((foo,) * 7))
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_grouped_by_priority(self):
query = Query(self.env, group='priority')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.product AS product,t.milestone AS milestone,t.priority AS priority,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
ORDER BY COALESCE(priority.value,'')='',%(cast_priority)s,t.id""" % {
'cast_priority': self.env.get_read_db().cast('priority.value', 'int')})
self.assertEqual([], args)
tickets = query.execute(self.req)
def test_special_character_escape(self):
query = Query.from_string(self.env, r'status=here\&now|maybe\|later|back\slash',
order='id')
sql, args = query.get_sql()
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.status AS status,t.owner AS owner,t.type AS type,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE (COALESCE(t.status,'') IN (%s,%s,%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['here&now', 'maybe|later', 'back\\slash'], args)
tickets = query.execute(self.req)
def test_user_var(self):
query = Query.from_string(self.env, 'owner=$USER&order=id')
sql, args = query.get_sql(req=self.req)
self.assertEqualSQL(sql,
"""SELECT t.id AS id,t.summary AS summary,t.owner AS owner,t.type AS type,t.status AS status,t.priority AS priority,t.product AS product,t.time AS time,t.changetime AS changetime,priority.value AS priority_value
FROM ticket AS t
LEFT OUTER JOIN enum AS priority ON (priority.type='priority' AND priority.name=priority)
WHERE ((COALESCE(t.owner,'')=%s))
ORDER BY COALESCE(t.id,0)=0,t.id""")
self.assertEqual(['anonymous'], args)
tickets = query.execute(self.req)
class ProductQueryLinksTestCase(QueryLinksTestCase, MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductQueryTestCase,'test'),
unittest.makeSuite(ProductQueryLinksTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/model.py | bloodhound_multiproduct/tests/ticket/model.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's tickets model in product environments"""
from datetime import datetime
import shutil
import unittest
from trac.ticket.model import Milestone, Ticket
from trac.ticket.tests.model import TicketTestCase, TicketCommentTestCase, \
TicketCommentEditTestCase, TicketCommentDeleteTestCase, EnumTestCase, \
MilestoneTestCase, ComponentTestCase, VersionTestCase
from trac.util.datefmt import to_utimestamp, utc
from multiproduct.model import Product
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
try:
import threading
except ImportError:
threading = None
from Queue import Queue
class ProductTicketTestCase(TicketTestCase, MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
self._load_default_data(self.env)
self.env.config.set('ticket-custom', 'foo', 'text')
self.env.config.set('ticket-custom', 'cbon', 'checkbox')
self.env.config.set('ticket-custom', 'cboff', 'checkbox')
def tearDown(self):
self.global_env.reset_db()
self.env = self.global_env = None
def _get_ticket_uid(self, tid):
with self.env.db_query as db:
rows = db("""SELECT uid FROM ticket WHERE id=%s""", (tid, ))
return rows[0][0] if rows else -1
def test_insert_into_multiple_products(self):
# UIDs are global, autoincremented
# IDs are product-scoped, incremented in the SQL translator
self.env = ProductEnvironment(self.global_env, self.default_product)
tid = self._insert_ticket('hello kitty', reporter='admin')
ticket = Ticket(self.env, tid)
self.assertEqual(tid, 1)
self.assertEqual(self._get_ticket_uid(tid), 1)
self.assertEqual(ticket.id, tid)
tid = self._insert_ticket('hello kitteh', reporter='admin')
ticket = Ticket(self.env, tid)
self.assertEqual(tid, 2)
self.assertEqual(self._get_ticket_uid(tid), 2)
self.assertEqual(ticket.id, tid)
p2 = Product(self.global_env)
p2.prefix = 'p2'
p2.name = 'product, too'
p2.owner = 'admin'
p2.insert()
self.env = ProductEnvironment(self.global_env, p2)
tid = self._insert_ticket('hello catty', reporter='admin')
ticket = Ticket(self.env, tid)
self.assertEqual(tid, 1)
self.assertEqual(self._get_ticket_uid(tid), 3)
self.assertEqual(ticket.id, tid)
tid = self._insert_ticket('hello ocelot', reporter='admin')
ticket = Ticket(self.env, tid)
self.assertEqual(tid, 2)
self.assertEqual(self._get_ticket_uid(tid), 4)
self.assertEqual(ticket.id, tid)
class ProductTicketCommentTestCase(MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self._env = self.global_env = None
class ProductTicketCommentEditTestCase(TicketCommentEditTestCase,
ProductTicketCommentTestCase):
pass
class ProductTicketCommentDeleteTestCase(TicketCommentDeleteTestCase,
ProductTicketCommentTestCase):
pass
class ProductEnumTestCase(EnumTestCase, MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
self._load_default_data(self.env)
def tearDown(self):
self.global_env.reset_db()
self.env = self.global_env = None
class ProductMilestoneTestCase(MilestoneTestCase, MultiproductTestCase):
def setUp(self):
self.global_env = self._setup_test_env(create_folder=True)
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self.env = ProductEnvironment(self.global_env, self.default_product)
self._load_default_data(self.env)
def tearDown(self):
shutil.rmtree(self.global_env.path)
self.global_env.reset_db()
self.env = self.global_env = None
@unittest.skipUnless(threading, 'Threading required for test')
def test_milestone_threads(self):
""" Ensure that in threaded (e.g. mod_wsgi) situations, we get
an accurate list of milestones from Milestone.list
The basic strategy is:
thread-1 requests a list of milestones
thread-2 adds a milestone
thread-1 requests a new list of milestones
To pass, thread-1 should have a list of milestones that matches
those that are in the database.
"""
lock = threading.RLock()
results = []
# two events to coordinate the workers and ensure that the threads
# alternate appropriately
e1 = threading.Event()
e2 = threading.Event()
def task(add):
"""the thread task - either we are discovering or adding events"""
with lock:
env = ProductEnvironment(self.global_env,
self.default_product)
if add:
name = 'milestone_from_' + threading.current_thread().name
milestone = Milestone(env)
milestone.name = name
milestone.insert()
else:
# collect the names of milestones reported by Milestone and
# directly from the db - as sets to ease comparison later
results.append({
'from_t': set([m.name for m in Milestone.select(env)]),
'from_db': set(
[v[0] for v in self.env.db_query(
"SELECT name FROM milestone")])})
def worker1():
""" check milestones in this thread twice either side of ceding
control to worker2
"""
task(False)
e1.set()
e2.wait()
task(False)
def worker2():
""" adds a milestone when worker1 allows us to then cede control
back to worker1
"""
e1.wait()
task(True)
e2.set()
t1, t2 = [threading.Thread(target=f) for f in (worker1, worker2)]
t1.start()
t2.start()
t1.join()
t2.join()
r = results[-1] # note we only care about the final result
self.assertEqual(r['from_t'], r['from_db'])
def test_update_milestone(self):
self.env.db_transaction("INSERT INTO milestone (name) VALUES ('Test')")
milestone = Milestone(self.env, 'Test')
t1 = datetime(2001, 01, 01, tzinfo=utc)
t2 = datetime(2002, 02, 02, tzinfo=utc)
milestone.due = t1
milestone.completed = t2
milestone.description = 'Foo bar'
milestone.update()
self.assertEqual(
[('Test', to_utimestamp(t1), to_utimestamp(t2), 'Foo bar',
self.default_product)],
self.env.db_query("SELECT * FROM milestone WHERE name='Test'"))
class ProductComponentTestCase(ComponentTestCase, MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
self._load_default_data(self.env)
def tearDown(self):
self.global_env.reset_db()
self.env = self.global_env = None
class ProductVersionTestCase(VersionTestCase, MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
self._load_default_data(self.env)
def tearDown(self):
self.global_env.reset_db()
self.env = self.global_env = None
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ProductTicketTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductTicketCommentEditTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductTicketCommentDeleteTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductEnumTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductMilestoneTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductComponentTestCase, 'test'))
suite.addTest(unittest.makeSuite(ProductVersionTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/conversion.py | bloodhound_multiproduct/tests/ticket/conversion.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's MIME conversions for tickets in
product environments"""
import os.path
import unittest
from trac.test import Mock
from trac.mimeview.api import Mimeview
from trac.ticket.tests.conversion import TicketConversionTestCase
from trac.web.href import Href
from multiproduct.ticket.web_ui import ProductTicketModule
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductTicketConversionTestCase(TicketConversionTestCase, \
MultiproductTestCase):
def setUp(self):
self._mp_setup()
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
# Product name inserted in RSS feed
self.env.product._data['name'] = 'My Project'
self.env.config.set('trac', 'templates_dir',
os.path.join(os.path.dirname(self.env.path),
'templates'))
self.ticket_module = ProductTicketModule(self.env)
self.mimeview = Mimeview(self.env)
self.req = Mock(base_path='/trac.cgi', path_info='',
href=Href('/trac.cgi'), chrome={'logo': {}},
abs_href=Href('http://example.org/trac.cgi'),
environ={}, perm=[], authname='-', args={}, tz=None,
locale='', session=None, form_token=None)
def test_csv_conversion(self):
ticket = self._create_a_ticket()
csv = self.mimeview.convert_content(self.req, 'trac.ticket.Ticket',
ticket, 'csv')
self.assertEqual(('\xef\xbb\xbf'
'id,summary,reporter,owner,description,status,'
'product,keywords,cc\r'
'\n1,Foo,santa,,Bar,,,,\r\n',
'text/csv;charset=utf-8', 'csv'), csv)
def test_tab_conversion(self):
ticket = self._create_a_ticket()
csv = self.mimeview.convert_content(self.req, 'trac.ticket.Ticket',
ticket, 'tab')
self.assertEqual(('\xef\xbb\xbf'
'id\tsummary\treporter\towner\tdescription\tstatus\t'
'product\tkeywords\tcc\r\n'
'1\tFoo\tsanta\t\tBar\t\t\t\t\r\n',
'text/tab-separated-values;charset=utf-8', 'tsv'),
csv)
def tearDown(self):
self.global_env.reset_db()
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductTicketConversionTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/__init__.py | bloodhound_multiproduct/tests/ticket/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests import TestLoader
def test_suite():
return TestLoader().discover_package(__package__, pattern='*.py')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/notification.py | bloodhound_multiproduct/tests/ticket/notification.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's tickets notifications
in product environments"""
import unittest
from trac.tests.notification import SMTPThreadedServer
from trac.ticket.tests import notification
from tests.env import ProductEnvironmentStub as ProductEnvironment
from tests.env import MultiproductTestCase
class ProductNotificationTestCase(notification.NotificationTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(self._env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
notification.notifysuite.tear_down()
self.global_env.reset_db()
class ProductNotificationTestSuite(notification.NotificationTestSuite):
def __init__(self):
"""Start the local SMTP test server"""
notification.NotificationTestSuite.__init__(self)
self._tests = []
self.addTest(unittest.makeSuite(ProductNotificationTestCase, 'test'))
def test_suite():
if not notification.notifysuite:
notification.notifysuite = ProductNotificationTestSuite()
return notification.notifysuite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(test_suite())
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/batch.py | bloodhound_multiproduct/tests/ticket/batch.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's tickets batch updates
in product environments"""
import unittest
from trac.perm import PermissionCache
from trac.test import Mock
from trac.ticket.batch import BatchModifyModule
from trac.ticket.tests.batch import BatchModifyTestCase
from trac.ticket.default_workflow import ConfigurableTicketWorkflow
from trac.util.datefmt import utc
from multiproduct.env import ProductEnvironment
from multiproduct.ticket.web_ui import ProductTicketModule
from tests.env import MultiproductTestCase
class ProductBatchModifyTestCase(BatchModifyTestCase, MultiproductTestCase):
def setUp(self):
self.global_env = self._setup_test_env(create_folder=False)
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self.env = ProductEnvironment(self.global_env, self.default_product)
self.global_env.enable_component_in_config(self.env,
ConfigurableTicketWorkflow)
self.global_env.enable_component_in_config(self.env,
ProductTicketModule)
self._load_default_data(self.env)
self.req = Mock(href=self.env.href, authname='anonymous', tz=utc)
self.req.session = {}
self.req.perm = PermissionCache(self.env)
def tearDown(self):
self.global_env.reset_db()
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductBatchModifyTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/ticket/roadmap.py | bloodhound_multiproduct/tests/ticket/roadmap.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's roadmap in product environments"""
import unittest
from trac.ticket.tests.roadmap import DefaultTicketGroupStatsProviderTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductDefaultTicketGroupStatsProviderTestCase(
DefaultTicketGroupStatsProviderTestCase, MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(create_folder=False)
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self.env = None
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductDefaultTicketGroupStatsProviderTestCase,
'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/versioncontrol/svn_authz.py | bloodhound_multiproduct/tests/versioncontrol/svn_authz.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's SVN authz policy in product environments"""
import os
import unittest
from trac.versioncontrol.svn_authz import AuthzSourcePolicy
from trac.versioncontrol.tests.svn_authz import AuthzSourcePolicyTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductAuthzSourcePolicyTestCase(AuthzSourcePolicyTestCase, \
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(enable=[AuthzSourcePolicy])
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
os.remove(self.authz)
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductAuthzSourcePolicyTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/versioncontrol/api.py | bloodhound_multiproduct/tests/versioncontrol/api.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's repository API in product environments"""
import unittest
from trac.resource import Resource, get_resource_description, get_resource_url
from trac.versioncontrol.api import Repository
from trac.versioncontrol.tests.api import ResourceManagerTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductResourceManagerTestCase(ResourceManagerTestCase,
MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_resource_changeset(self):
res = Resource('changeset', '42')
self.assertEqual('Changeset 42', get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/changeset/42',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('changeset', '42', parent=repo)
self.assertEqual('Changeset 42 in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/changeset/42/repo',
get_resource_url(self.env, res, self.env.href))
def test_resource_source(self):
res = Resource('source', '/trunk/src')
self.assertEqual('path /trunk/src',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/trunk/src',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('source', '/trunk/src', parent=repo)
self.assertEqual('path /trunk/src in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/repo/trunk/src',
get_resource_url(self.env, res, self.env.href))
repo = Resource('repository', 'repo')
res = Resource('source', '/trunk/src', version=42, parent=repo)
self.assertEqual('path /trunk/src@42 in repo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/repo/trunk/src?rev=42',
get_resource_url(self.env, res, self.env.href))
def test_resource_repository(self):
res = Resource('repository', 'testrepo')
self.assertEqual('Repository testrepo',
get_resource_description(self.env, res))
self.assertEqual('/trac.cgi/products/tp1/browser/testrepo',
get_resource_url(self.env, res, self.env.href))
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductResourceManagerTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/versioncontrol/__init__.py | bloodhound_multiproduct/tests/versioncontrol/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests import TestLoader
def test_suite():
return TestLoader().discover_package(__package__, pattern='*.py')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/versioncontrol/cache.py | bloodhound_multiproduct/tests/versioncontrol/cache.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's repository cache in
product environments"""
import unittest
from trac.versioncontrol.tests.cache import CacheTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductCacheTestCase(CacheTestCase, MultiproductTestCase):
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env()
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(
self.global_env, self.default_product)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductCacheTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/admin/console.py | bloodhound_multiproduct/tests/admin/console.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's admin console in product environments"""
import os.path
import sys
import unittest
from trac.admin.tests.console import TracadminTestCase, load_expected_results
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductTracAdminTestCase(TracadminTestCase, MultiproductTestCase):
expected_results = load_expected_results(
os.path.join(os.path.split(__file__)[0], 'console-tests.txt'),
'===== (test_[^ ]+) =====')
@property
def env(self):
env = getattr(self, '_env', None)
if env is None:
self.global_env = self._setup_test_env(
enable=('trac.*', 'multiproduct.*'),
disable=('trac.tests.*',),
)
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self._env = env = ProductEnvironment(self.global_env,
self.default_product)
self._load_default_data(env)
return env
@env.setter
def env(self, value):
pass
def tearDown(self):
self.global_env.reset_db()
self.global_env = self._env = None
def test_product_help_ok(self):
self._admin.env_set('', self.global_env)
from trac import __version__
test_name = sys._getframe().f_code.co_name
expected_results = self.expected_results[test_name] \
% {'version': __version__}
rv, output = self._execute('product admin %s help'
% self.default_product)
self.assertEqual(0, rv)
self.assertEqual(expected_results, output)
def test_product_help_version(self):
rv, output = self._execute('help version')
self.assertEqual(0, rv)
expected = self.expected_results[self._testMethodName]
self.assertEqual(expected, output)
def test_product_help_version_add(self):
rv, output = self._execute('help version add')
self.assertEqual(0, rv)
expected = self.expected_results[self._testMethodName]
self.assertEqual(expected, output)
def test_product_fail_version_add(self):
rv, output = self._execute('version add v x y')
self.assertEqual(2, rv)
expected = self.expected_results[self._testMethodName]
self.assertEqual(expected, output)
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductTracAdminTestCase),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/admin/product_admin.py | bloodhound_multiproduct/tests/admin/product_admin.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's product admin"""
import sys
import unittest
from wsgiref.util import setup_testing_defaults
from trac.admin.api import IAdminPanelProvider
from trac.admin.web_ui import AdminModule, PluginAdminPanel
from trac.core import Component, implements
from trac.perm import DefaultPermissionPolicy, DefaultPermissionStore, \
PermissionCache, PermissionSystem
from trac.tests.perm import TestPermissionRequestor
from trac.web.api import HTTP_STATUS, HTTPForbidden, HTTPNotFound, \
IRequestFilter, RequestDone, Request
from trac.web.main import RequestDispatcher
from multiproduct import api, product_admin
from multiproduct.env import ProductEnvironment
from multiproduct.product_admin import IProductAdminAclContributor, \
ProductAdminModule
from tests.env import MultiproductTestCase
class TestAdminHandledException(Exception):
product = None
category = None
page = None
path_info = None
admin_panels = None
class TestAdminPanel(Component):
implements(IAdminPanelProvider, IRequestFilter)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TRAC_ADMIN' in req.perm:
yield 'testcat1', 'Test category 1', 'panel1', 'Test panel 1'
yield 'testcat1', 'Test category 1', 'panel2', 'Test panel 2'
yield 'testcat1', 'Test category 1', 'panel3', 'Test panel 3'
yield 'testcat2', 'Test category 2', 'panel1', 'Test panel 1'
yield 'testcat2', 'Test category 2', 'panel_2', 'Test panel 2'
yield 'testcat2', 'Test category 2', 'panel-3', 'Test panel 3'
yield 'testcat3', 'Test category 3', 'panel1', 'Test panel 1'
yield 'testcat3', 'Test category 3', 'panel2', 'Test panel 2'
def render_admin_panel(self, req, category, page, path_info):
req.perm.require('TRAC_ADMIN')
return 'test.html', {'path_info' : path_info}
def pre_process_request(self, req, handler):
return handler
def post_process_request(self, req, template, data, content_type):
if sys.exc_info() == (None, None, None):
exc = TestAdminHandledException()
exc.product = self.env.product.prefix \
if isinstance(self.env, ProductEnvironment) \
else ''
exc.category = data.get('active_cat')
exc.page = data.get('active_panel')
exc.path_info = data.get('path_info')
exc.admin_panels = data.get('panels')
raise exc
else:
return template, data, content_type
class PanelsWhitelist(Component):
implements(product_admin.IProductAdminAclContributor)
# IProductAdminAclContributor methods
def enable_product_admin_panels(self):
yield 'testcat1', 'panel1'
yield 'testcat1', 'panel3'
yield 'testcat2', 'panel3'
yield 'general', 'plugin'
class SectionWhitelist(Component):
implements(product_admin.IProductAdminAclContributor)
# IProductAdminAclContributor methods
def enable_product_admin_panels(self):
yield 'testcat3', '*'
class BaseProductAdminPanelTestCase(MultiproductTestCase):
def setUp(self):
self._mp_setup(enable=[AdminModule, DefaultPermissionPolicy,
DefaultPermissionStore, PermissionSystem,
PluginAdminPanel, RequestDispatcher,
api.MultiProductSystem,
product_admin.ProductAdminModule,
PanelsWhitelist, SectionWhitelist,
TestAdminPanel, TestPermissionRequestor])
self.global_env = self.env
self.env = ProductEnvironment(self.global_env, self.default_product)
ProductAdminModule = product_admin.ProductAdminModule
self.global_product_admin = ProductAdminModule(self.global_env)
self.product_admin = ProductAdminModule(self.env)
def tearDown(self):
self.global_env.reset_db()
self.env = self.global_env = None
self.product_admin = self.global_product_admin = None
class ProductAdminSetupTestCase(BaseProductAdminPanelTestCase):
ALL_PANELS = [('testcat1', 'panel1'), ('testcat1', 'panel2'),
('testcat1', 'panel3'), ('testcat2', 'panel_1'),
('testcat2', 'panel-2'), ('testcat2', 'panel3'),
('testcat3', 'panel1'), ('testcat3', 'panel2'),
('general', 'plugin'), ]
def test_init_whitelist(self):
self.assertEqual({}, self.global_product_admin.acl)
self.assertEqual({'testcat3' : True,
('testcat1', 'panel1') : True,
('testcat1', 'panel3'): True,
('testcat2', 'panel3'): True,
('general', 'plugin') : True,},
self.product_admin.acl)
self.assertTrue(all(not self.global_product_admin._check_panel(c, p)
for c, p in self.ALL_PANELS))
self.assertTrue(self.product_admin._check_panel('testcat1', 'panel1'))
self.assertFalse(self.product_admin._check_panel('testcat1', 'panel2'))
self.assertTrue(self.product_admin._check_panel('testcat1', 'panel3'))
self.assertFalse(self.product_admin._check_panel('testcat2', 'panel_1'))
self.assertFalse(self.product_admin._check_panel('testcat2', 'panel-2'))
self.assertTrue(self.product_admin._check_panel('testcat2', 'panel3'))
self.assertTrue(self.product_admin._check_panel('testcat3', 'panel1'))
self.assertTrue(self.product_admin._check_panel('testcat3', 'panel2'))
self.assertFalse(self.product_admin._check_panel('general', 'plugin'))
self.assertFalse(self.product_admin._check_panel('other', 'panel'))
def test_init_blacklist(self):
self.global_env.config.set('multiproduct', 'admin_blacklist',
'testcat1:panel1,testcat3:panel2')
self.env.config.set('multiproduct', 'admin_blacklist',
'testcat1:panel3,testcat3:panel1,testcat2:*')
self.assertEqual(['testcat1:panel1','testcat3:panel2'],
self.global_product_admin.raw_blacklist)
self.assertEqual(['testcat1:panel3','testcat3:panel1','testcat2:*'],
self.product_admin.raw_blacklist)
self.assertEqual({}, self.global_product_admin.acl)
self.assertEqual({'testcat3' : True,
'testcat2' : False,
('testcat1', 'panel1') : True,
('testcat1', 'panel3'): False,
('testcat2', 'panel3'): True,
('testcat3', 'panel1'): False,
('general', 'plugin'): True,},
self.product_admin.acl)
self.assertTrue(all(not self.global_product_admin._check_panel(c, p)
for c, p in self.ALL_PANELS))
self.assertTrue(self.product_admin._check_panel('testcat1', 'panel1'))
self.assertFalse(self.product_admin._check_panel('testcat1', 'panel2'))
self.assertFalse(self.product_admin._check_panel('testcat1', 'panel3'))
self.assertFalse(self.product_admin._check_panel('testcat2', 'panel_1'))
self.assertFalse(self.product_admin._check_panel('testcat2', 'panel-2'))
self.assertFalse(self.product_admin._check_panel('testcat2', 'panel3'))
self.assertFalse(self.product_admin._check_panel('testcat3', 'panel1'))
self.assertTrue(self.product_admin._check_panel('testcat3', 'panel2'))
self.assertFalse(self.product_admin._check_panel('general', 'plugin'))
self.assertFalse(self.product_admin._check_panel('other', 'panel'))
class ProductAdminDispatchTestCase(BaseProductAdminPanelTestCase):
maxDiff = None
def setUp(self):
BaseProductAdminPanelTestCase.setUp(self)
self.global_env.config.set('multiproduct', 'admin_blacklist',
'testcat1:panel1,testcat3:panel2')
self.env.config.set('multiproduct', 'admin_blacklist',
'testcat1:panel3,testcat3:panel1,testcat2:*')
global_permsys = PermissionSystem(self.global_env)
permsys = PermissionSystem(self.env)
global_permsys.grant_permission('adminuser', 'TRAC_ADMIN')
global_permsys.grant_permission('prodadmin', 'PRODUCT_ADMIN')
global_permsys.grant_permission('testuser', 'TEST_ADMIN')
permsys.grant_permission('prodadmin', 'PRODUCT_ADMIN')
permsys.grant_permission('testuser', 'TEST_ADMIN')
self.req = self._get_request_obj()
def tearDown(self):
BaseProductAdminPanelTestCase.tearDown(self)
self.req = None
def _get_request_obj(self):
environ = {}
setup_testing_defaults(environ)
def start_response(status, headers):
return lambda body: None
req = Request(environ, start_response)
return req
def _dispatch(self, req, env):
req.perm = PermissionCache(env, req.authname)
return RequestDispatcher(env).dispatch(req)
GLOBAL_PANELS = [
{'category': {'id': 'general', 'label': 'General'},
'panel': {'id': 'plugin', 'label': 'Plugins'}},
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel2', 'label': 'Test panel 2'}},
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel3', 'label': 'Test panel 3'}},
{'category': {'id': 'testcat2', 'label': 'Test category 2'},
'panel': {'id': 'panel-3', 'label': 'Test panel 3'}},
{'category': {'id': 'testcat2', 'label': 'Test category 2'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat2', 'label': 'Test category 2'},
'panel': {'id': 'panel_2', 'label': 'Test panel 2'}},
{'category': {'id': 'testcat3', 'label': 'Test category 3'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat3', 'label': 'Test category 3'},
'panel': {'id': 'panel2', 'label': 'Test panel 2'}}]
PRODUCT_PANELS_ALL = [
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel2', 'label': 'Test panel 2'}},
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel3', 'label': 'Test panel 3'}},
{'category': {'id': 'testcat2', 'label': 'Test category 2'},
'panel': {'id': 'panel-3', 'label': 'Test panel 3'}},
{'category': {'id': 'testcat2', 'label': 'Test category 2'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat2', 'label': 'Test category 2'},
'panel': {'id': 'panel_2', 'label': 'Test panel 2'}},
{'category': {'id': 'testcat3', 'label': 'Test category 3'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat3', 'label': 'Test category 3'},
'panel': {'id': 'panel2', 'label': 'Test panel 2'}}]
PRODUCT_PANELS_ALLOWED = [
{'category': {'id': 'testcat1', 'label': 'Test category 1'},
'panel': {'id': 'panel1', 'label': 'Test panel 1'}},
{'category': {'id': 'testcat3', 'label': 'Test category 3'},
'panel': {'id': 'panel2', 'label': 'Test panel 2'}}]
# TRAC_ADMIN
def test_tracadmin_global_panel(self):
"""Test admin panel with TRAC_ADMIN in global env
"""
req = self.req
req.authname = 'adminuser'
req.environ['PATH_INFO'] = '/admin/testcat1/panel1/some/path'
with self.assertRaises(TestAdminHandledException) as test_cm:
self._dispatch(req, self.global_env)
exc = test_cm.exception
self.assertEqual('', exc.product)
self.assertEqual('testcat1', exc.category)
self.assertEqual('panel1', exc.page)
self.assertEqual('some/path', exc.path_info)
self.assertEqual(self.GLOBAL_PANELS, exc.admin_panels)
def test_tracadmin_global_plugins(self):
"""Plugin admin panel with TRAC_ADMIN in global env
"""
req = self.req
req.authname = 'adminuser'
req.environ['PATH_INFO'] = '/admin/general/plugin'
# Plugin admin panel looked up but disabled
with self.assertRaises(TestAdminHandledException) as test_cm:
self._dispatch(req, self.global_env)
exc = test_cm.exception
self.assertEqual(self.GLOBAL_PANELS, exc.admin_panels)
def test_tracadmin_product_panel_blacklist(self):
"""Test blacklisted admin panel with TRAC_ADMIN in product env
"""
req = self.req
req.authname = 'adminuser'
req.environ['PATH_INFO'] = '/admin/testcat3/panel1/some/path'
with self.assertRaises(TestAdminHandledException) as test_cm:
self._dispatch(req, self.env)
exc = test_cm.exception
self.assertEqual(self.default_product, exc.product)
self.assertEqual('testcat3', exc.category)
self.assertEqual('panel1', exc.page)
self.assertEqual('some/path', exc.path_info)
self.assertEqual(self.PRODUCT_PANELS_ALL, exc.admin_panels)
def test_tracadmin_product_panel_whitelist(self):
"""Test whitelisted admin panel with TRAC_ADMIN in product env
"""
req = self.req
req.authname = 'adminuser'
req.environ['PATH_INFO'] = '/admin/testcat1/panel1/some/path'
with self.assertRaises(TestAdminHandledException) as test_cm:
self._dispatch(req, self.env)
exc = test_cm.exception
self.assertEqual(self.default_product, exc.product)
self.assertEqual('testcat1', exc.category)
self.assertEqual('panel1', exc.page)
self.assertEqual('some/path', exc.path_info)
self.assertEqual(self.PRODUCT_PANELS_ALL, exc.admin_panels)
def test_tracadmin_product_plugins(self):
"""Plugin admin panel with TRAC_ADMIN in global env
"""
req = self.req
req.authname = 'adminuser'
req.environ['PATH_INFO'] = '/admin/general/plugin'
# Plugin admin panel not available in product context
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
# PRODUCT_ADMIN
def test_productadmin_global_panel_whitelist(self):
"""Test whitelisted admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/testcat1/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_productadmin_global_panel_blacklist(self):
"""Test blacklisted admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/testcat3/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_productadmin_global_panel_norules(self):
"""Test unspecified admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/testcat1/panel2/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_productadmin_global_plugins(self):
"""Plugin admin panel with PRODUCT_ADMIN in global env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/general/plugin'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_productadmin_product_panel_whitelist(self):
"""Test whitelisted admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/testcat1/panel1/some/path'
with self.assertRaises(TestAdminHandledException) as test_cm:
self._dispatch(req, self.env)
exc = test_cm.exception
self.assertEqual(self.default_product, exc.product)
self.assertEqual('testcat1', exc.category)
self.assertEqual('panel1', exc.page)
self.assertEqual('some/path', exc.path_info)
self.assertEqual(self.PRODUCT_PANELS_ALLOWED, exc.admin_panels)
def test_productadmin_product_panel_blacklist(self):
"""Test blacklisted admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/testcat3/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
def test_productadmin_product_panel_norules(self):
"""Test unspecified admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/testcat1/panel2/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
def test_productadmin_product_plugins(self):
"""Plugin admin panel with PRODUCT_ADMIN in product env
"""
req = self.req
req.authname = 'prodadmin'
req.environ['PATH_INFO'] = '/admin/general/plugin'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
# Without meta-permissions
def test_user_global_panel_whitelist(self):
"""Test whitelisted admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/testcat1/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_user_global_panel_blacklist(self):
"""Test blacklisted admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/testcat3/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_user_global_panel_norules(self):
"""Test unspecified admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/testcat1/panel2/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_user_global_plugins(self):
"""Plugin admin panel without meta-perm in global env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/general/plugin'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.global_env)
def test_user_product_panel_whitelist(self):
"""Test whitelisted admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/testcat1/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
def test_user_product_panel_blacklist(self):
"""Test blacklisted admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/testcat3/panel1/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
def test_user_product_panel_norules(self):
"""Test unspecified admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/testcat1/panel2/some/path'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
def test_user_product_plugins(self):
"""Plugin admin panel without meta-perm in product env
"""
req = self.req
req.authname = 'testuser'
req.environ['PATH_INFO'] = '/admin/general/plugin'
with self.assertRaises(HTTPNotFound):
self._dispatch(req, self.env)
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductAdminSetupTestCase,'test'),
unittest.makeSuite(ProductAdminDispatchTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/admin/__init__.py | bloodhound_multiproduct/tests/admin/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests import TestLoader
def test_suite():
return TestLoader().discover_package(__package__, pattern='*.py')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/wiki/wikisyntax.py | bloodhound_multiproduct/tests/wiki/wikisyntax.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for inherited Apache(TM) Bloodhound's wiki syntax
in product environments"""
import os.path
import re
import unittest
from trac.wiki.tests import wikisyntax
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
from tests.wiki import formatter
def test_suite():
suite = unittest.TestSuite()
suite.addTest(formatter.test_suite(wikisyntax.TEST_CASES,
wikisyntax.wiki_setup, wikisyntax.__file__,
wikisyntax.wiki_teardown))
suite.addTest(formatter.test_suite(wikisyntax.RELATIVE_LINKS_TESTS,
wikisyntax.wiki_setup, wikisyntax.__file__,
wikisyntax.wiki_teardown,
context=('wiki', 'Main/Sub')))
suite.addTest(formatter.test_suite(wikisyntax.SPLIT_PAGE_NAMES_TESTS,
wikisyntax.wiki_setup_split,
wikisyntax.__file__,
wikisyntax.wiki_teardown,
context=('wiki', 'Main/Sub')))
suite.addTest(formatter.test_suite(wikisyntax.SCOPED_LINKS_TESTS,
wikisyntax.wiki_setup, wikisyntax.__file__,
wikisyntax.wiki_teardown,
context=('wiki',
'FirstLevel/SecondLevel/ThirdLevel')))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/wiki/macros.py | bloodhound_multiproduct/tests/wiki/macros.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for inherited Apache(TM) Bloodhound's wiki macros
in product environments"""
import os.path
import re
import unittest
from trac.wiki.tests import macros
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
from tests.wiki import formatter
def test_suite():
suite = unittest.TestSuite()
suite.addTest(formatter.test_suite(
macros.IMAGE_MACRO_TEST_CASES,
file=macros.__file__))
suite.addTest(formatter.test_suite(
macros.TITLEINDEX1_MACRO_TEST_CASES,
file=macros.__file__))
suite.addTest(formatter.test_suite(
macros.TITLEINDEX2_MACRO_TEST_CASES,
file=macros.__file__,
setup=macros.titleindex2_setup,
teardown=macros.titleindex_teardown))
suite.addTest(formatter.test_suite(
macros.TITLEINDEX3_MACRO_TEST_CASES,
file=macros.__file__,
setup=macros.titleindex3_setup,
teardown=macros.titleindex_teardown))
suite.addTest(formatter.test_suite(
macros.TITLEINDEX4_MACRO_TEST_CASES,
file=macros.__file__,
setup=macros.titleindex4_setup,
teardown=macros.titleindex_teardown))
suite.addTest(formatter.test_suite(
macros.TITLEINDEX5_MACRO_TEST_CASES,
file=macros.__file__,
setup=macros.titleindex5_setup,
teardown=macros.titleindex_teardown))
suite.addTest(formatter.test_suite(
macros.RECENTCHANGES_MACRO_TEST_CASES,
file=macros.__file__,
setup=macros.recentchanges_setup,
teardown=macros.recentchanges_teardown))
suite.addTest(formatter.test_suite(
macros.TRACINI_MACRO_TEST_CASES,
file=macros.__file__,
setup=macros.tracini_setup,
teardown=macros.tracini_teardown))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/wiki/model.py | bloodhound_multiproduct/tests/wiki/model.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's ticket reports in product environments"""
import os.path
import shutil
import tempfile
import unittest
from trac.wiki.tests.model import WikiPageTestCase
from multiproduct.env import ProductEnvironment
from tests.env import MultiproductTestCase
class ProductWikiPageTestCase(WikiPageTestCase, MultiproductTestCase):
def setUp(self):
self.global_env = self._setup_test_env(create_folder=True,
path=os.path.join(tempfile.gettempdir(), 'trac-tempenv') )
self._upgrade_mp(self.global_env)
self._setup_test_log(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
self.env = ProductEnvironment(self.global_env, self.default_product)
def tearDown(self):
self.global_env.reset_db()
shutil.rmtree(self.global_env.path)
self.env = self.global_env = None
def test_suite():
return unittest.TestSuite([
unittest.makeSuite(ProductWikiPageTestCase,'test'),
])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/wiki/formatter.py | bloodhound_multiproduct/tests/wiki/formatter.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Tests for Apache(TM) Bloodhound's wiki formatters in product environments"""
import os.path
import re
import unittest
from trac.test import Mock, MockPerm, locale_en
from trac.util.datefmt import utc
from trac.web.href import Href
from trac.web.main import FakeSession
from trac.wiki.tests import formatter
from multiproduct.env import ProductEnvironment
from multiproduct.model import Product
from tests.env import MultiproductTestCase
class ProductWikiTestCase(formatter.WikiTestCase, MultiproductTestCase):
maxDiff = None
def setUp(self):
self._prepare_env()
self._setup_test_log(self.global_env)
formatter.WikiTestCase.setUp(self)
if self.context.req:
self.context.req.session = FakeSession()
if self.mpctx:
candidates = set(self.mpctx.get('load_products', []) +
[self.mpctx.get('main_product')])
candidates -= set([self.default_product, None,
self.mpctx.get('setup_product')])
for prefix in candidates:
self._load_product_from_data(self.env, prefix)
prefix = self.mpctx.get('main_product', NotImplemented)
if prefix is None:
self.env = self.global_env
elif prefix is not NotImplemented \
and (self.env is self.global_env or
prefix != self.env.product.prefix):
self.env = ProductEnvironment(self.global_env, prefix)
# Enable multi-product components
self.env.config.set('components', 'multiproduct.*', 'enabled')
def _prepare_env(self):
all_test_components = [
formatter.HelloWorldMacro, formatter.DivHelloWorldMacro,
formatter.TableHelloWorldMacro, formatter.DivCodeMacro,
formatter.DivCodeElementMacro, formatter.DivCodeStreamMacro,
formatter.NoneMacro, formatter.WikiProcessorSampleMacro,
formatter.SampleResolver]
self.global_env = self._setup_test_env(
enable=['trac.*', 'multiproduct.*'] + all_test_components
)
self._upgrade_mp(self.global_env)
self._load_product_from_data(self.global_env, self.default_product)
prefix = self.default_product
if self.mpctx:
prefix = self.mpctx.get('setup_product', prefix)
if prefix and prefix != self.default_product:
self._load_product_from_data(self.global_env, prefix)
if prefix:
self.env = ProductEnvironment(self.global_env,
prefix or self.default_product)
else:
self.env = self.global_env
self.env.path = ''
config = self.env.config
# -- intertrac support
config.set('intertrac', 'trac.title', "Trac's Trac")
config.set('intertrac', 'trac.url',
"http://trac.edgewall.org")
config.set('intertrac', 't', 'trac')
config.set('intertrac', 'th.title', "Trac Hacks")
config.set('intertrac', 'th.url',
"http://trac-hacks.org")
config.set('intertrac', 'th.compat', 'false')
# -- safe schemes
config.set('wiki', 'safe_schemes',
'file,ftp,http,https,svn,svn+ssh,'
'rfc-2396.compatible,rfc-2396+under_score')
req = Mock(href=Href('/'), abs_href=Href('http://www.example.com/'),
authname='anonymous', perm=MockPerm(), tz=utc, args={},
locale=locale_en, lc_time=locale_en)
self.env.href = req.href
self.env.abs_href = req.abs_href
def tearDown(self):
self.global_env.reset_db()
try:
if self._teardown:
self._teardown(self)
finally:
self.global_env = self._env = None
def __init__(self, title, input, correct, file, line, setup=None,
teardown=None, context=None, mpctx=None):
MultiproductTestCase.__init__(self, 'test')
self.mpctx = mpctx
formatter.WikiTestCase.__init__(self, title, input, correct, file, line,
setup, teardown, context)
class ProductOneLinerTestCase(ProductWikiTestCase):
formatter = formatter.OneLinerTestCase.formatter.im_func
class ProductEscapeNewLinesTestCase(ProductWikiTestCase):
generate_opts = formatter.EscapeNewLinesTestCase.generate_opts
formatter = formatter.EscapeNewLinesTestCase.formatter.im_func
class ProductOutlineTestCase(ProductWikiTestCase):
formatter = formatter.OutlineTestCase.formatter.im_func
def test_suite(data=None, setup=None, file=formatter.__file__,
teardown=None, context=None, mpctx=None):
suite = unittest.TestSuite()
def add_test_cases(data, filename):
tests = re.compile('^(%s.*)$' % ('=' * 30), re.MULTILINE).split(data)
next_line = 1
line = 0
for title, test in zip(tests[1::2], tests[2::2]):
title = title.lstrip('=').strip()
if line != next_line:
line = next_line
if not test or test == '\n':
continue
next_line += len(test.split('\n')) - 1
if 'SKIP' in title or 'WONTFIX' in title:
continue
blocks = test.split('-' * 30 + '\n')
if len(blocks) < 5:
blocks.extend([None,] * (5 - len(blocks)))
input, page, oneliner, page_escape_nl, outline = blocks[:5]
if page:
page = ProductWikiTestCase(
title, input, page, filename, line, setup,
teardown, context, mpctx)
if oneliner:
oneliner = ProductOneLinerTestCase(
title, input, oneliner[:-1], filename, line, setup,
teardown, context, mpctx)
if page_escape_nl:
page_escape_nl = ProductEscapeNewLinesTestCase(
title, input, page_escape_nl, filename, line, setup,
teardown, context, mpctx)
if outline:
outline = ProductOutlineTestCase(
title, input, outline, filename, line, setup,
teardown, context, mpctx)
for tc in [page, oneliner, page_escape_nl, outline]:
if tc:
suite.addTest(tc)
if data:
add_test_cases(data, file)
else:
for f in ('wiki-tests.txt', 'wikicreole-tests.txt'):
testfile = os.path.join(os.path.split(file)[0], f)
if os.path.exists(testfile):
data = open(testfile, 'r').read().decode('utf-8')
add_test_cases(data, testfile)
else:
print 'no ', testfile
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/wiki/__init__.py | bloodhound_multiproduct/tests/wiki/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from tests import TestLoader
def test_suite():
return TestLoader().discover_package(__package__, pattern='*.py')
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/functional/prefs.py | bloodhound_multiproduct/tests/functional/prefs.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from trac.tests import functional
from trac.tests.functional.tester import tc
#----------------
# Functional test cases for preferences (rewritten)
#----------------
# TODO: These classes are almost a copycat of Trac's. Beware of license header
class TestPreferences(functional.FunctionalTwillTestCaseSetup):
def runTest(self):
"""Set preferences for admin user"""
prefs_url = self._tester.url + "/prefs"
# [BLOODHOUND] Preferences link removed
tc.follow('/prefs')
tc.url(prefs_url)
tc.notfind('Your preferences have been saved.')
tc.formvalue('userprefs', 'name', ' System Administrator ')
tc.formvalue('userprefs', 'email', ' admin@example.com ')
tc.submit()
tc.find('Your preferences have been saved.')
tc.follow('Date & Time')
tc.url(prefs_url + '/datetime')
tc.formvalue('userprefs', 'tz', 'GMT -10:00')
tc.submit()
tc.find('Your preferences have been saved.')
tc.follow('General')
tc.url(prefs_url)
tc.notfind('Your preferences have been saved.')
tc.find('value="System Administrator"')
tc.find(r'value="admin@example\.com"')
tc.follow('Date & Time')
tc.url(prefs_url + '/datetime')
tc.find('GMT -10:00')
class RegressionTestRev5785(functional.FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the fix in r5785"""
prefs_url = self._tester.url + "/prefs"
# [BLOODHOUND] Preferences link removed
tc.follow('/prefs')
tc.url(prefs_url)
self._tester.logout()
self._tester.login('admin')
class RegressionTestTicket5765(functional.FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5765
Unable to turn off 'Enable access keys' in Preferences
"""
self._tester.go_to_front()
# [BLOODHOUND] Preferences link removed
tc.follow('/prefs')
tc.follow('Keyboard Shortcuts')
tc.formvalue('userprefs', 'accesskeys', True)
tc.submit()
tc.find('name="accesskeys".*checked="checked"')
tc.formvalue('userprefs', 'accesskeys', False)
tc.submit()
tc.notfind('name="accesskeys".*checked="checked"')
def trac_functionalSuite(suite=None):
suite.addTest(TestPreferences())
suite.addTest(RegressionTestRev5785())
suite.addTest(RegressionTestTicket5765())
#--------------
# Multiproduct test cases
#--------------
def functionalSuite(suite=None):
if not suite:
import tests.functional
suite = tests.functional.functionalSuite()
trac_functionalSuite(suite)
return suite
if __name__ == '__main__':
import unittest
unittest.main(defaultTest='functionalSuite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/functional/admin.py | bloodhound_multiproduct/tests/functional/admin.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from trac.perm import PermissionSystem
from trac.tests.functional import FunctionalTwillTestCaseSetup, internal_error
from trac.tests.functional.tester import tc
from multiproduct.env import ProductEnvironment
from tests.functional import MultiproductFunctionalTestCase
#----------------
# Functional test cases for admin web UI & CLI
#----------------
class TestAdminProductDefault(MultiproductFunctionalTestCase,
FunctionalTwillTestCaseSetup):
def runTest(self):
"""Admin set default product"""
prefix, name, owner = self._tester.admin_create_product(owner='admin')
products_url = self._tester.url + '/admin/ticket/products'
tc.go(products_url)
tc.formvalue('product_table', 'default', prefix)
tc.submit('apply')
tc.find('type="radio" name="default" value="%s" checked="checked"'
% prefix)
tc.go(self._tester.url + '/newticket')
tc.find('<option selected="selected" value="%s">%s</option>'
% (prefix, name))
# Test the "Clear default" button
tc.go(products_url)
tc.submit('clear', 'product_table')
tc.notfind('type="radio" name="default" value=".+" checked="checked"')
class TestAdminProductRename(MultiproductFunctionalTestCase,
FunctionalTwillTestCaseSetup):
def runTest(self):
"""Rename product from the admin page."""
prefix, name, owner = self._tester.admin_create_product(owner='admin')
with self.in_product(prefix) as (testenv, tester):
t1 = tester.create_ticket()
t2 = tester.create_ticket()
new_name = '%s%s' % (name, str(uuid.uuid4()).split('-')[0])
admin_product_url = self._tester.url + '/admin/ticket/products'
tc.go(admin_product_url + '/' + prefix)
tc.formvalue('modprod', 'name', new_name)
tc.submit('save')
tc.find("Your changes have been saved")
tc.find(r'<a href="/admin/ticket/products/%s">%s</a>'
% (prefix, new_name))
with self.in_product(prefix) as (testenv, tester):
tester.go_to_ticket(t1)
comment = "Product %s renamed to %s" % (name, new_name)
tc.find(comment)
tester.go_to_ticket(t2)
tc.find(comment)
class TestConsoleProductRename(MultiproductFunctionalTestCase,
FunctionalTwillTestCaseSetup):
def runTest(self):
"""Rename product from the console."""
prefix, name, owner = self._tester.admin_create_product(owner='admin')
with self.in_product(prefix) as (testenv, tester):
t1 = tester.create_ticket()
t2 = tester.create_ticket()
new_name = '%s%s' % (name, str(uuid.uuid4()).split('-')[0])
self._testenv._tracadmin('product', 'rename', prefix, new_name)
with self.in_product(prefix) as (testenv, tester):
tester.go_to_ticket(t1)
comment = "Product %s renamed to %s" % (name, new_name)
tc.find(comment)
tester.go_to_ticket(t2)
tc.find(comment)
class RegressionTestBhTicket667(MultiproductFunctionalTestCase,
FunctionalTwillTestCaseSetup):
def runTest(self):
"""User is redirected to the login page when the page they are
navigating to is forbidden.
"""
env = self._testenv.get_trac_environment()
actions = PermissionSystem(env).get_user_permissions('anonymous')
# Revoke all permissions for 'anonymous'
self._testenv._tracadmin('permission', 'remove', 'anonymous', *actions)
self._testenv.restart() # TODO: can be removed when #539 is resolved
try:
with self._tester.as_user(None):
tc.go(self._tester.url)
tc.notfind(internal_error)
tc.url(self._tester.url + '/login\?referer=%2F$')
finally:
self._testenv._tracadmin('permission', 'add', 'anonymous',
*actions)
def functionalSuite(suite=None):
if not suite:
import tests.functional
suite = tests.functional.functionalSuite()
suite.addTest(TestAdminProductDefault())
suite.addTest(TestAdminProductRename())
suite.addTest(TestConsoleProductRename())
suite.addTest(RegressionTestBhTicket667())
return suite
if __name__ == '__main__':
unittest.main(defaultTest='functionalSuite')
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | false |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/functional/ticket.py | bloodhound_multiproduct/tests/functional/ticket.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Override a few functional tests for tickets.
"""
from urlparse import urlsplit
from twill.errors import TwillException
from trac.ticket.tests.functional import *
from tests import unittest
from tests.functional import regex_owned_by
#----------------
# Functional test cases for tickets (rewritten)
#----------------
# TODO: These classes are almost a copycat of Trac's. Beware of license header
class TestTicketPreview(FunctionalTwillTestCaseSetup):
"""There's no such thing like ticket preview in Bloodhound but, if it would
then the corresponding Trac test case should be rewritten like this.
"""
def runTest(self):
"""Preview ticket creation
"""
# [BLOODHOUND] New Ticket => More fields (in create ticket menu)
self._tester.go_to_newticket()
summary = random_sentence(5)
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-summary', summary)
tc.formvalue('propertyform', 'field-description', desc)
tc.submit('preview')
tc.url(self._tester.url + '/newticket$')
tc.find('ticket not yet created')
tc.find(summary)
tc.find(desc)
class TestTicketNoSummary(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Creating a ticket without summary should fail
"""
# [BLOODHOUND] New Ticket => More fields (in create ticket menu)
self._tester.go_to_newticket()
desc = random_sentence(5)
tc.formvalue('propertyform', 'field-description', desc)
# [BLOODHOUND] no actual button to submit /newticket `propertyform`
tc.submit()
tc.find(desc)
tc.find('Tickets must contain a summary.')
# [BLOODHOUND] Create New Ticket => New Ticket
tc.find('New Ticket')
tc.find('ticket not yet created')
class TestTicketCustomFieldTextNoFormat(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test custom text field with no format explicitly specified.
Its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
self._testenv.restart()
val = "%s %s" % (random_unique_camel(), random_word())
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', val)
class TestTicketCustomFieldTextAreaNoFormat(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', '')
env.config.save()
self._testenv.restart()
val = "%s %s" % (random_unique_camel(), random_word())
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', val)
class TestTicketCustomFieldTextWikiFormat(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test custom text field with `wiki` format.
Its contents should through the wiki engine, wiki-links and all.
Feature added in http://trac.edgewall.org/ticket/1791
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
wiki = '<a [^>]*>%s\??</a> %s' % (word1, word2)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', wiki)
class TestTicketCustomFieldTextAreaWikiFormat(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test custom textarea field with no format explicitly specified,
its contents should be rendered as plain text.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'textarea')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'wiki')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
wiki = '<p>\s*<a [^>]*>%s\??</a> %s<br />\s*</p>' % (word1, word2)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', wiki)
class TestTicketCustomFieldTextReferenceFormat(FunctionalTwillTestCaseSetup):
# Run this test case in default product context to keep body agnostic to
# context switching
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test custom text field with `reference` format.
Its contents are treated as a single value
and are rendered as an auto-query link.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'reference')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
query = 'status=!closed&newfield=%s\+%s' % (word1, word2)
path_prefix = urlsplit(self._tester.url).path
querylink = '<a href="%s/query\?%s">%s</a>' % (path_prefix, query, val)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', querylink)
class TestTicketCustomFieldTextListFormat(FunctionalTwillTestCaseSetup):
# Run this test case in default product context to keep body agnostic to
# context switching
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test custom text field with `list` format.
Its contents are treated as a space-separated list of values
and are rendered as separate auto-query links per word.
Feature added in http://trac.edgewall.org/ticket/10643
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'Another Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
self._testenv.restart()
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
ticketid = self._tester.create_ticket(summary=random_sentence(3),
info={'newfield': val})
self._tester.go_to_ticket(ticketid)
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
path_prefix = urlsplit(self._tester.url).path
querylink1 = '<a href="%s/query\?%s">%s</a>' % (path_prefix,
query1, word1)
querylink2 = '<a href="%s/query\?%s">%s</a>' % (path_prefix,
query2, word2)
querylinks = '%s %s' % (querylink1, querylink2)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', querylinks)
class RegressionTestTicket10828(FunctionalTwillTestCaseSetup):
# Run this test case in default product context to keep body agnostic to
# context switching
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/10828
Rendered property changes should be described as lists of added and
removed items, even in the presence of comma and semicolon separators.
"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'newfield', 'text')
env.config.set('ticket-custom', 'newfield.label',
'A Custom Field')
env.config.set('ticket-custom', 'newfield.format', 'list')
env.config.save()
self._testenv.restart()
ticketid = self._tester.create_ticket(summary=random_sentence(3))
self._tester.go_to_ticket(ticketid)
word1 = random_unique_camel()
word2 = random_word()
val = "%s %s" % (word1, word2)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added' % (word1, word2))
word3 = random_unique_camel()
word4 = random_unique_camel()
val = "%s, %s; %s" % (word2, word3, word4)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> added; <em>%s</em> removed'
% (word3, word4, word1))
tc.formvalue('propertyform', 'field-newfield', '')
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> removed'
% (word2, word3, word4))
val = "%s %s,%s" % (word1, word2, word3)
tc.formvalue('propertyform', 'field-newfield', val)
tc.submit('submit')
tc.find('<em>%s</em> <em>%s</em> <em>%s</em> added'
% (word1, word2, word3))
query1 = 'status=!closed&newfield=~%s' % word1
query2 = 'status=!closed&newfield=~%s' % word2
query3 = 'status=!closed&newfield=~%s' % word3
path_prefix = urlsplit(self._tester.url).path
querylink1 = '<a href="%s/query\?%s">%s</a>' % (path_prefix,
query1, word1)
querylink2 = '<a href="%s/query\?%s">%s</a>' % (path_prefix,
query2, word2)
querylink3 = '<a href="%s/query\?%s">%s</a>' % (path_prefix,
query3, word3)
querylinks = '%s %s, %s' % (querylink1, querylink2, querylink3)
# [BLOODHOUND] Different markup to render field values
self._tester.find_ticket_field('newfield', querylinks)
class RegressionTestTicket5394a(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 a
Order user list alphabetically in (re)assign action
"""
# set restrict_owner config
env = self._testenv.get_trac_environment()
env.config.set('ticket', 'restrict_owner', 'yes')
env.config.save()
self._testenv.restart()
self._tester.go_to_front()
self._tester.logout()
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
# Apprently it takes a sec for the new user to be recognized by the
# environment. So we add all the users, then log in as the users
# in a second loop. This should be faster than adding a sleep(1)
# between the .adduser and .login steps.
for user in test_users:
self._testenv.adduser(user)
for user in test_users:
self._tester.login(user)
self._tester.logout()
self._tester.login('admin')
ticketid = self._tester.create_ticket("regression test 5394a")
self._tester.go_to_ticket(ticketid)
# [BLOODHOUND] Workflow <select /> does not end with id attribute
options = 'id="action_reassign_reassign_owner"[^>]*>' + \
''.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
tc.find(options, 's')
# We don't have a good way to fully delete a user from the Trac db.
# Once we do, we may want to cleanup our list of users here.
class RegressionTestTicket5394b(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5394 b
Order user list alphabetically on new ticket page
"""
#FIXME : Test is missing a lot of context. See RegressionTestTicket5394a
# Must run after RegressionTestTicket5394a
self._tester.go_to_front()
# [BLOODHOUND] New Ticket => More fields (in create ticket menu)
self._tester.go_to_newticket()
# [BLOODHOUND] Create New Ticket => New Ticket
tc.find('New Ticket')
test_users = ['alice', 'bob', 'jane', 'john', 'charlie', 'alan',
'zorro']
options = 'id="field-owner"[^>]*>[[:space:]]*<option/>.*' + \
'.*'.join(['<option[^>]*>%s</option>' % user for user in
sorted(test_users + ['admin', 'user'])])
options = '.*'.join(sorted(test_users + ['admin', 'user']))
tc.find(options, 's')
# FIXME: Verbatim copy of its peer just to override regex_owned_by
class RegressionTestTicket5497a(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 a
Open ticket, component changed, owner not changed"""
ticketid = self._tester.create_ticket("regression test 5497a")
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.submit('submit')
tc.find(regex_owned_by('user'))
# FIXME: Verbatim copy of its peer just to override regex_owned_by
class RegressionTestTicket5497b(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 b
Open ticket, component changed, owner changed"""
ticketid = self._tester.create_ticket("regression test 5497b")
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'field-component', 'regression5497')
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'admin')
tc.submit('submit')
tc.notfind(regex_owned_by('user'))
tc.find(regex_owned_by('admin'))
# FIXME: Verbatim copy of its peer just to override regex_owned_by
class RegressionTestTicket5497c(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 c
New ticket, component changed, owner not changed"""
ticketid = self._tester.create_ticket("regression test 5497c",
{'component':'regression5497'})
self._tester.go_to_ticket(ticketid)
tc.find(regex_owned_by('user'))
# FIXME: Verbatim copy of its peer just to override regex_owned_by
class RegressionTestTicket5497d(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5497 d
New ticket, component changed, owner changed"""
ticketid = self._tester.create_ticket("regression test 5497d",
{'component':'regression5497', 'owner':'admin'})
self._tester.go_to_ticket(ticketid)
tc.find(regex_owned_by('admin'))
class RegressionTestRev5994(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of the column label fix in r5994"""
env = self._testenv.get_trac_environment()
env.config.set('ticket-custom', 'custfield', 'text')
env.config.set('ticket-custom', 'custfield.label', 'Custom Field')
env.config.save()
try:
self._testenv.restart()
self._tester.go_to_query()
self._tester.find_query_column_selector('custfield', 'Custom Field')
finally:
pass
#env.config.set('ticket', 'restrict_owner', 'no')
#env.config.save()
#self._testenv.restart()
class RegressionTestTicket6048(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6048"""
# Setup the DeleteTicket plugin
plugin = open(os.path.join(self._testenv.command_cwd, 'sample-plugins',
'workflow', 'DeleteTicket.py')).read()
open(os.path.join(self._testenv.tracdir, 'plugins', 'DeleteTicket.py'),
'w').write(plugin)
env = self._testenv.get_trac_environment()
# [BLOODHOUND] Ensure plugin will be enabled in target scope
env.config.set('components', 'DeleteTicket.*', 'enabled')
prevconfig = env.config.get('ticket', 'workflow')
env.config.set('ticket', 'workflow',
prevconfig + ',DeleteTicketActionController')
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
# Create a ticket and delete it
ticket_id = self._tester.create_ticket(
summary='RegressionTestTicket6048')
# (Create a second ticket so that the ticket id does not get reused
# and confuse the tester object.)
self._tester.create_ticket(summary='RegressionTestTicket6048b')
self._tester.go_to_ticket(ticket_id)
tc.find('delete ticket')
tc.formvalue('propertyform', 'action', 'delete')
tc.submit('submit')
self._tester.go_to_ticket(ticket_id)
tc.find('Error: Invalid ticket number')
tc.find('Ticket %s does not exist.' % ticket_id)
# Remove the DeleteTicket plugin
env.config.set('ticket', 'workflow', prevconfig)
env.config.save()
env = self._testenv.get_trac_environment() # reload environment
for ext in ('py', 'pyc', 'pyo'):
filename = os.path.join(self._testenv.tracdir, 'plugins',
'DeleteTicket.%s' % ext)
if os.path.exists(filename):
os.unlink(filename)
class RegressionTestTicket7821group(FunctionalTwillTestCaseSetup):
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/7821 group"""
env = self._testenv.get_trac_environment()
saved_default_query = env.config.get('query', 'default_query')
default_query = 'status!=closed&order=status&group=status&max=42' \
'&desc=1&groupdesc=1&col=summary|status|cc' \
'&cc~=$USER'
env.config.set('query', 'default_query', default_query)
env.config.save()
try:
self._testenv.restart()
self._tester.create_ticket('RegressionTestTicket7821 group')
self._tester.go_to_query()
# $USER
tc.find('<input type="text" name="0_cc" value="admin"'
' size="[0-9]+" />')
# col
tc.find('<input type="checkbox" name="col" value="summary"'
' checked="checked" />')
tc.find('<input type="checkbox" name="col" value="owner" />')
tc.find('<input type="checkbox" name="col" value="status"'
' checked="checked" />')
tc.find('<input type="checkbox" name="col" value="cc"'
' checked="checked" />')
# group
tc.find('<option selected="selected" value="status">Status'
'</option>')
# groupdesc
tc.find('<input type="checkbox" name="groupdesc" id="groupdesc"'
' checked="checked" />')
# max
# [BLOODHOUND] class="input-mini" added (Twitter Bootstrap)
tc.find('<input type="text" name="max" id="max" size="[0-9]*?"'
' value="42" [^/]*/>')
# col in results
tc.find('<a title="Sort by Ticket [(]ascending[)]" ')
tc.find('<a title="Sort by Summary [(]ascending[)]" ')
tc.find('<a title="Sort by Status [(]ascending[)]" ')
tc.find('<a title="Sort by Cc [(]ascending[)]" ')
tc.notfind('<a title="Sort by Owner "')
finally:
env.config.set('query', 'default_query', saved_default_query)
env.config.save()
self._testenv.restart()
class RegressionTestTicket8247(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/8247
Author field of ticket comment corresponding to the milestone removal
was always 'anonymous'.
"""
name = "MilestoneRemove"
self._tester.create_milestone(name)
id = self._tester.create_ticket(info={'milestone': name})
ticket_url = self._tester.url + "/ticket/%d" % id
tc.go(ticket_url)
tc.find(name)
tc.go(self._tester.url + "/admin/ticket/milestones")
tc.formvalue('milestone_table', 'sel', name)
tc.submit('remove')
tc.go(ticket_url)
# [BLOODHOUND] Ticket comment header changed
tc.find('<strong class="trac-field-milestone">Milestone</strong>'
'[ \n\t]*<span>[ \n\t]*<em>%s</em> deleted' % name)
tc.find('by admin<span>, <a.* ago</a></span>')
tc.notfind('anonymous')
class TestTimelineTicketDetails(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test ticket details on timeline"""
env = self._testenv.get_trac_environment()
env.config.set('timeline', 'ticket_show_details', 'yes')
env.config.save()
summary = random_sentence(5)
ticketid = self._tester.create_ticket(summary)
self._tester.go_to_ticket(ticketid)
self._tester.add_comment(ticketid)
self._tester.go_to_timeline()
tc.formvalue('prefs', 'ticket_details', True)
tc.submit()
htmltags = '(<[^>]*>)*'
# [BLOODHOUND] Ticket events are different i.e. 'by user' outside <a />
tc.find(htmltags + 'Ticket ' + htmltags + '#' + str(ticketid) +
htmltags + ' \\(' + summary + '\\) updated\\s*' +
htmltags + '\\s+by\\s+' + htmltags + 'admin', 's')
class TestTicketHistoryDiff(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test ticket history (diff)"""
name = 'TestTicketHistoryDiff'
ticketid = self._tester.create_ticket(name)
self._tester.go_to_ticket(ticketid)
tc.formvalue('propertyform', 'description', random_sentence(6))
tc.submit('submit')
# [BLOODHOUND] Description 'modified' in comments feed inside <span />
tc.find('Description<[^>]*>\\s*<[^>]*>\\s*modified \\(<[^>]*>diff', 's')
tc.follow('diff')
tc.find('Changes\\s*between\\s*<[^>]*>Initial Version<[^>]*>\\s*and' \
'\\s*<[^>]*>Version 1<[^>]*>\\s*of\\s*<[^>]*>Ticket #' , 's')
class RegressionTestTicket5602(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/5602"""
# Create a set of tickets, and assign them all to a milestone
milestone = self._tester.create_milestone()
ids = [self._tester.create_ticket() for x in range(5)]
[self._tester.ticket_set_milestone(x, milestone) for x in ids]
# Need a ticket in each state: new, assigned, accepted, closed,
# reopened
# leave ids[0] as new
# make ids[1] be assigned
self._tester.go_to_ticket(ids[1])
tc.formvalue('propertyform', 'action', 'reassign')
tc.formvalue('propertyform', 'action_reassign_reassign_owner', 'admin')
tc.submit('submit')
# make ids[2] be accepted
self._tester.go_to_ticket(ids[2])
tc.formvalue('propertyform', 'action', 'accept')
tc.submit('submit')
# make ids[3] be closed
self._tester.go_to_ticket(ids[3])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# make ids[4] be reopened
self._tester.go_to_ticket(ids[4])
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
tc.submit('submit')
# FIXME: we have to wait a second to avoid "IntegrityError: columns
# ticket, time, field are not unique"
time.sleep(1)
tc.formvalue('propertyform', 'action', 'reopen')
tc.submit('submit')
tc.show()
tc.notfind("Python Traceback")
# Go to the milestone and follow the links to the closed and active
# tickets.
tc.go(self._tester.url + "/roadmap")
tc.follow(milestone)
# [BLOODHOUND] closed: labels in milestone progress bar removed
tc.follow(r"/query\?.*status=closed&.*milestone=%s$" % (milestone,))
tc.find("Resolution:[ \t\n]+fixed")
tc.back()
# [BLOODHOUND] active: labels in milestone progress bar removed
tc.follow(r"/query\?.*status=new&.*milestone=%s$" % (milestone,))
tc.find("Status:[ \t\n]+new")
tc.find("Status:[ \t\n]+assigned")
tc.find("Status:[ \t\n]+accepted")
tc.notfind("Status:[ \t\n]+closed")
tc.find("Status:[ \t\n]+reopened")
class RegressionTestTicket9084(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/9084"""
ticketid = self._tester.create_ticket()
self._tester.add_comment(ticketid)
self._tester.go_to_ticket(ticketid)
tc.submit('2', formname='reply-to-comment-1') # '1' hidden, '2' submit
tc.formvalue('propertyform', 'comment', random_sentence(3))
# [BLOODHPUND] In ticket comments reply form 'Submit changes'=>'Submit'
tc.submit('Submit')
tc.notfind('AssertionError')
class RegressionTestTicket6879a(FunctionalTwillTestCaseSetup,
unittest.TestCase):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 a
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
ticket_id = self._tester.create_ticket("RegressionTestTicket6879 a")
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
# [BLOODHOUND] No preview button for ticket (comments) in BH theme
try:
tc.submit('preview')
except TwillException:
self.skipTest('Active theme without ticket preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('preview')
class RegressionTestTicket6879b(FunctionalTwillTestCaseSetup,
unittest.TestCase):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Test for regression of http://trac.edgewall.org/ticket/6879 b
Make sure that previewing a close does not make the available actions
be those for the close status.
"""
# create a ticket, then preview resolving the ticket twice
ticket_id = self._tester.create_ticket("RegressionTestTicket6879 b")
self._tester.go_to_ticket(ticket_id)
tc.formvalue('propertyform', 'action', 'resolve')
tc.formvalue('propertyform', 'action_resolve_resolve_resolution', 'fixed')
# [BLOODHOUND] No preview button for ticket (comments) in BH theme
try:
tc.submit('preview')
except TwillException:
self.skipTest('Active theme without ticket comment preview')
tc.formvalue('propertyform', 'action', 'resolve')
tc.submit('submit')
class TestAdminPriorityRenumber(FunctionalTwillTestCaseSetup):
BH_IN_DEFAULT_PRODUCT = True
def runTest(self):
"""Admin renumber priorities"""
# [BLOODHOUND] class="input-mini" appended to priorities <select />
valuesRE = re.compile('<select name="value_([0-9]+)".*>', re.M)
html = b.get_html()
max_priority = max([int(x) for x in valuesRE.findall(html)])
name = "RenumberPriority"
self._tester.create_priority(name + '1')
self._tester.create_priority(name + '2')
priority_url = self._tester.url + '/admin/ticket/priority'
tc.go(priority_url)
tc.url(priority_url + '$')
tc.find(name + '1')
tc.find(name + '2')
tc.formvalue('enumtable', 'value_%s' % (max_priority + 1), str(max_priority + 2))
tc.formvalue('enumtable', 'value_%s' % (max_priority + 2), str(max_priority + 1))
tc.submit('apply')
tc.url(priority_url + '$')
# Verify that their order has changed.
tc.find(name + '2.*' + name + '1', 's')
# Ensure that overridden code will be loaded
def trac_functionalSuite(suite=None):
suite.addTest(TestTickets())
# [BLOODHOUND] there's no such thing like ticket preview
#suite.addTest(TestTicketPreview())
suite.addTest(TestTicketNoSummary())
suite.addTest(TestTicketAltFormats())
suite.addTest(TestTicketCSVFormat())
suite.addTest(TestTicketTabFormat())
suite.addTest(TestTicketRSSFormat())
# [BLOODHOUND] TODO: Move to BloodhoundSearch plugin
# suite.addTest(TestTicketSearch())
# suite.addTest(TestNonTicketSearch())
suite.addTest(TestTicketHistory())
suite.addTest(TestTicketHistoryDiff())
suite.addTest(TestTicketQueryLinks())
suite.addTest(TestTicketQueryOrClause())
suite.addTest(TestTicketCustomFieldTextNoFormat())
suite.addTest(TestTicketCustomFieldTextWikiFormat())
suite.addTest(TestTicketCustomFieldTextAreaNoFormat())
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
apache/bloodhound | https://github.com/apache/bloodhound/blob/c3e31294e68af99d4e040e64fbdf52394344df9e/bloodhound_multiproduct/tests/functional/__init__.py | bloodhound_multiproduct/tests/functional/__init__.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import imp
import os
import sys
import time
import urllib2
from inspect import isclass
from subprocess import call, Popen
from trac.tests import functional
from trac.tests.contentgen import (
random_page, random_paragraph, random_sentence, random_unique_camel,
random_word
)
from trac.tests.functional.svntestenv import SvnFunctionalTestEnvironment
from trac.tests.functional.testenv import FunctionalTestEnvironment, ConnectError
from trac.tests.functional.tester import b, FunctionalTester, internal_error, tc
from trac.util.compat import close_fds
from trac.util.text import unicode_quote
from trac.web.href import Href
from multiproduct.api import MultiProductSystem
from multiproduct.env import ProductEnvironment
from multiproduct import hooks
from multiproduct.product_admin import ProductAdminModule
from tests import unittest
#----------------
# Constants
#----------------
from multiproduct.dbcursor import GLOBAL_PRODUCT as GLOBAL_ENV
#----------------
# Product-aware classes for functional tests
#----------------
# TODO: Virtual ABCs for isinstance() checks
class MultiproductFunctionalMixin(object):
"""Mixin class applying multi-product upgrade path upon a given
functional Trac test environment. Access to the global environment
is provided at testing time. In order to obtain a compatible test
environment for a given product @see: `product_test_env` method
@attention: This class must precede functional test environment class in
class declaration because it overrides some methods
"""
@property
def parent(self):
return None
def init(self):
"""Determine the location of Trac source code
"""
self.bh_install_project = 'trac'
self.bhmp_upgrade = False
self.trac_src = os.path.realpath(os.path.join(
__import__('trac', []).__file__, '..' , '..'))
self.bh_src = os.path.realpath(os.path.join(
__import__('multiproduct', []).__file__, '..' , '..', '..'))
self.htdigest = os.path.join(self.dirname, "htdigest")
self.htdigest_realm = 'bloodhound'
print "\nFound Trac source: %s" \
"\nFound Bloodhound source: %s" % (self.trac_src, self.bh_src)
super(MultiproductFunctionalMixin, self).init()
def create(self):
"""Create a new test environment.
This will set up Bloodhound and authentication by invoking installer
script, then call :meth:`create_repo`.
"""
os.mkdir(self.dirname)
self.create_repo()
self._bloodhound_install()
if call([sys.executable,
os.path.join(self.trac_src, 'contrib', 'htpasswd.py'), "-c",
"-b", self.htpasswd, "admin", "admin"], close_fds=close_fds,
cwd=self.command_cwd):
raise Exception('Unable to setup admin password')
self.adduser('user')
self._tracadmin('permission', 'add', 'admin', 'TRAC_ADMIN')
# Setup Trac logging
env = self.get_trac_environment()
env.config.set('logging', 'log_type', 'file')
for component in self.get_enabled_components():
env.config.set('components', component, 'enabled')
env.config.save()
self.post_create(env)
def adduser_htpasswd(self, user):
"""Add a user to the environment. The password will be set
in htpasswd file to the same as username.
"""
return super(MultiproductFunctionalMixin, self).adduser(user)
def adduser_htdigest(self, user):
"""Add a user to the environment. The password will be set
in htdigest file to the same as username.
"""
if call([sys.executable, os.path.join(self.trac_src, 'contrib',
'htdigest.py'), '-b', self.htdigest, self.htdigest_realm,
user, user], close_fds=close_fds, cwd=self.command_cwd):
raise Exception('Unable to setup password for user "%s"' % user)
adduser = adduser_htdigest
def get_env_href(self, user=None, prefix=None, envname=None):
"""Default implementation just returning href object for global
environment and failing if product prefix is specified.
"""
if envname not in (self.bh_install_project, None):
raise LookupError('Unknown environment ' + repr(envname))
if prefix is not None:
self._fail_no_mp_setup()
parts = urllib2.urlparse.urlsplit(self.url)
if not user or user == 'anonymous':
return Href('%s://%s/' % (parts[0], parts[1]))
else:
return Href('%s://%s:%s@%s/' % (parts[0], user, user, parts[1]))
def get_enabled_components(self):
"""Also enable Bloodhound multiproduct plugin.
"""
return super(MultiproductFunctionalMixin, self).get_enabled_components() + \
['multiproduct.*']
def post_create(self, env):
self.getLogger = lambda : env.log
print "Created test environment: %s" % self.dirname
# Setup URL generation for product environments
self.get_env_href = self.configure_web_hooks()
super(MultiproductFunctionalMixin, self).post_create(env)
def _tracadmin(self, *args, **kwargs):
"""Execute trac-admin command in product or (by default) global context
"""
do_wait = kwargs.pop('wait', False)
product_id = kwargs.pop('product', None)
if product_id is not None and product_id != GLOBAL_ENV:
if self.bhmp_upgrade and \
args[0] not in ProductAdminModule.GLOBAL_COMMANDS:
args = ('product', 'admin', product_id) + args
elif not self.bhmp_upgrade:
self._fail_no_mp_setup()
super(MultiproductFunctionalMixin, self)._tracadmin(*args, **kwargs)
if do_wait: # Delay to ensure command executes and caches resets
time.sleep(5)
def _tracd_options(self):
"""List options to run tracd server started for the test run.
"""
return ["--port=%s" % self.port, "-s", "--hostname=127.0.0.1"]
def start(self):
"""Starts the webserver, and waits for it to come up.
Notice: Same as inherited method but without basic auth by default
"""
if 'FIGLEAF' in os.environ:
exe = os.environ['FIGLEAF']
if ' ' in exe: # e.g. 'coverage run'
args = exe.split()
else:
args = [exe]
else:
args = [sys.executable]
options = self._tracd_options()
if 'TRAC_TEST_TRACD_OPTIONS' in os.environ:
options += os.environ['TRAC_TEST_TRACD_OPTIONS'].split()
self.get_trac_environment().log.debug('Starting tracd with args ' +
' '.join(options))
args.append(os.path.join(self.trac_src, 'trac', 'web',
'standalone.py'))
server = Popen(args + options + [self.tracdir],
stdout=functional.logfile, stderr=functional.logfile,
close_fds=close_fds,
cwd=self.command_cwd,
)
self.pid = server.pid
# Verify that the url is ok
timeout = 30
while timeout:
try:
tc.go(self.url)
break
except ConnectError:
time.sleep(1)
timeout -= 1
else:
raise Exception('Timed out waiting for server to start.')
tc.url(self.url)
def restart(self):
"""Restarts the webserver"""
self.stop()
self.start()
# Reload components e.g. those in /plugins folder
from trac.loader import load_components
global_env = self.get_trac_environment()
plugins_dir = global_env.shared_plugins_dir
load_components(global_env, plugins_dir and (plugins_dir,))
def product_testenv(self, product_id):
if product_id == GLOBAL_ENV:
return self.parent or self
else:
return FunctionalProductEnvironment(self, product_id)
def product_environment(self, product_id):
return ProductEnvironment(self.get_trac_environment(), product_id)
def configure_web_hooks(self):
"""Setup web bootstrap_handlers and generation of product and global
base URLs for a given user
:return: a function used to generate base URL for product and
global environments . It will satisfy the following signature
`base_url(user=None, prefix=None, envname=None)` where::
@param user: username used to construct URLs for authenticated
requests
@param prefix: product prefix ; global environment selected
if missing
@param envname: environment name , useful in functional setup
running sibling Trac environments under
parent directory
Generated URLs must be consistent with web hooks configuration
@see: `_configure_web_hooks` method . By default `envname` is ignored
and product base URL will be at /products under URL namespace of the
global environment.
"""
def _default_base_href(user=None, prefix=None, envname=None):
if envname not in (self.bh_install_project, None):
raise LookupError('Unknown environment ' + repr(envname))
# TODO: Does not generate /login ? Should it ?
parts = urllib2.urlparse.urlsplit(self.url)
if not user or user == 'anonymous':
global_href = Href('%s://%s/' % (parts[0], parts[1]))
else:
global_href = Href('%s://%s:%s@%s/' %
(parts[0], user, user, parts[1]))
# FIXME : Check that prefix is None is correct
return global_href if (prefix is None or prefix == GLOBAL_ENV) \
else Href(global_href('products', prefix))
return _default_base_href
# Protected methods
@property
def _bloodhound_install_args(self):
"""Determine arguments supplied in to Bloodhound installer.
"""
return dict(adminuser='admin', adminpass='admin',
dbstring=self.dburi, default_product_prefix='test',
digestfile=self.htdigest, realm=self.htdigest_realm,
repo_type=self.repotype,
repo_path=self.repo_path_for_initenv(),
sourcedir=self.bh_src)
def _bloodhound_install(self):
"""Execute Bloodhound installer script
"""
cwd = os.getcwdu()
try:
os.chdir(os.path.join(self.bh_src, 'installer'))
create_digest = imp.load_source('bloodhound_setup',
os.path.join(self.bh_src, 'installer',
'createdigest.py'))
sys.modules['createdigest'] = create_digest
bhsetup = imp.load_source('bloodhound_setup',
os.path.join(self.bh_src, 'installer',
'bloodhound_setup.py'))
# Enable timeline and roadmap views; needed in functional tests
bhsetup.BASE_CONFIG['mainnav'].update({'timeline': 'enabled',
'roadmap': 'enabled'})
bhsetup = bhsetup.BloodhoundSetup({'project' : self.bh_install_project,
'envsdir' : self.dirname})
# Do not perform Bloodhound-specific wiki upgrades
bhsetup.apply_bhwiki_upgrades = False
bh_install_args = self._bloodhound_install_args
bhsetup.setup(**bh_install_args)
except:
raise
else:
self.bhmp_upgrade = True
finally:
os.chdir(cwd)
def _fail_no_mp_setup(self):
raise EnvironmentError('Product admin executed before upgrade')
def _default_product(self, envname=None):
"""Default product configured for a given environment
@raise LookupError: if no environment matching `envname` can be found
"""
if envname not in ('trac', None):
raise LookupError('Unable to open environment ' + envname)
env = self.get_trac_environment()
return MultiProductSystem(env).default_product_prefix
# TODO: Virtual ABCs for isinstance() checks
# TODO: Assess implications of forwarding methods to global test env
class FunctionalProductEnvironment(object):
"""Functional test environment limiting interactions to product context
"""
def __init__(self, testenv, product_id):
"""Initialize functional product environment
@param product_id: target product prefix
@return: an object reusing resources in target functional test
environment to implement a compatible interface for
a given product environment
@raise LookupError: if there's no product for given prefix
"""
self.parent = testenv
self.prefix = product_id
self.url = self.parent.get_env_href(prefix=product_id)
ProductEnvironment(testenv.get_trac_environment(), self.prefix)
def _tracadmin(self, *args, **kwargs):
"""Execute trac-admin command in target product context by default
"""
product_id = kwargs.get('product')
if product_id is None:
kwargs['product'] = self.prefix
return self.parent._tracadmin(*args, **kwargs)
def get_trac_environment(self):
return ProductEnvironment(self.parent.get_trac_environment(),
self.prefix)
def create(self):
raise RuntimeError('Bloodhound test environment already created')
def _bloodhound_install(self):
raise RuntimeError('Bloodhound test environment already created')
def __getattr__(self, attrnm):
try:
if attrnm == 'parent':
raise AttributeError
return getattr(self.parent, attrnm)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'" %
(self.__class__.__name__, attrnm))
# TODO: Virtual ABCs for isinstance() checks
class BasicAuthTestEnvironment(object):
"""Setup tracd for HTTP basic authentication.
"""
def _tracd_options(self):
options = super(BasicAuthTestEnvironment, self)._tracd_options()
options.append("--basic-auth=%s,%s," % (self.bh_install_project,
self.htpasswd))
return options
# TODO: Virtual ABCs for isinstance() checks
class DigestAuthTestEnvironment(object):
"""Setup tracd for HTTP digest authentication.
"""
def _tracd_options(self):
options = super(DigestAuthTestEnvironment, self)._tracd_options()
options.append("--auth=%s,%s,%s" % (self.bh_install_project,
self.htdigest,
self.htdigest_realm))
return options
class BloodhoundFunctionalTester(FunctionalTester):
"""Leverages Trac library of higher-level operations for interacting with
a fully featured Apache(TM) Bloodhound test environment.
Many things have changed in recent versions of Apache(TM) Bloodhound
user interface once theme and dashboard are both installed:
- 'New Ticket' link has been phased out in favor of 'More fields' link in
quick create ticket shortcut menu.
- New helper method `quick_create_ticket` has been added to create a
new (random) ticket via quick create ticket shortcut menu.
- 'logged in as user' label replaced by '<i class="icon-user"></i>user'
- By using account manager plugin a web form must be submitted to login
- As a consequence of default hooks new tickets in global scope are
always bound to default product
- Timeline module is disabled; frequently used along functional tests
- View Tickets renamed to Tickets pointing at dashboard
- Milestones `No date set` label replaced by `Unscheduled`
- There's no actual button to submit `propertyform` in new ticket page
- Different markup used to render ticket fields
Other notable differences not solved by this class (target test cases
should be rewritten?)
- Preferences link removed in Bloodhound UI
- There's no such thing like ticket preview in Bloodhound UI
- 'Create New Ticket' label in new ticket page replaced by 'New Ticket'
- Ticket owner label changed from 'Owned by' to 'Assigned to'
- Source files (*.py) files copied in /plugins folder not enabled ootb
- Twitter Bootstrap class="input-mini" added in 'Max items per page'
input control in query view.
- Ticket comment header changed
- 'Page PageName created' is not shown anymore for new wiki page
- Ticket workflow <select /> does not end with `id` attribute
- Ticket events in timeline are different i.e. 'by user' outside <a />
- Description 'modified' label in ticket comments feed inside <span />
- closed: labels in milestone progress reports not shown anymore
- active: labels in milestone progress reports not shown anymore
- In ticket comments reply form 'Submit changes' => 'Submit'
- No preview button for ticket (comments) in BH theme
- class="input-mini" appended to priorities admin <select />
As a consequence some methods of Trac functional tester have to be updated.
"""
def __init__(self, url, skiplogin=False, instance_state=None):
"""Create a :class:`BloodhoundFunctionalTester` for the given
environment URL and Subversion URL
:param skiplogin: Skip admin user login
"""
self.url = url
self._state = instance_state or dict(ticketcount={})
# Connect, and login so we can run tests.
self.go_to_front()
if not skiplogin:
self.login('admin')
@property
def ticketcount(self):
"""Retrieve ticket count from shared instance state.
Ticket ID sequence is global.
"""
ticketcount_cache = self._state.setdefault('ticketcount', {})
return ticketcount_cache.get(self.url, 0)
@ticketcount.setter
def ticketcount(self, value):
ticketcount_cache = self._state.setdefault('ticketcount', {})
ticketcount_cache[self.url] = value
def login(self, username):
"""Login as the given user
Consider that 'logged in as user' label has been replaced by
'<i class="icon-user"></i>user'
"""
#FIXME: Keep/remove this ?
#tc.add_auth("", self.url, username, username)
self.go_to_front()
tc.find("Login")
tc.follow("Login")
# Submit user + password via account manager login form
tc.formvalue('acctmgr_loginform', 'user', username)
tc.formvalue('acctmgr_loginform', 'password', username)
tc.submit()
self.go_to_front()
tc.find(r'<i class="icon-user"></i>\s*%s' % username)
tc.find("Logout")
tc.url(self.url)
tc.notfind(internal_error)
def _post_create_ticket(self):
"""Look at the newly created ticket page after creating it
"""
# we should be looking at the newly created ticket
tc.url(self.url + '/ticket/%s' % (self.ticketcount + 1))
# Increment self.ticketcount /after/ we've verified that the ticket
# was created so a failure does not trigger spurious later
# failures.
self.ticketcount += 1
# verify the ticket creation event shows up in the timeline
self.go_to_timeline()
tc.formvalue('prefs', 'ticket', True)
tc.submit()
tc.find('Ticket.*#%s.*created' % self.ticketcount)
def create_ticket(self, summary=None, info=None):
"""Create a new (random) ticket in the test environment. Returns
the new ticket number.
:param summary:
may optionally be set to the desired summary
:param info:
may optionally be set to a dictionary of field value pairs for
populating the ticket. ``info['summary']`` overrides summary.
`summary` and `description` default to randomly-generated values.
"""
# [BLOODHOUND] New Ticket => More fields (in create ticket menu)
self.go_to_newticket()
tc.notfind(internal_error)
if summary == None:
summary = random_sentence(4)
tc.formvalue('propertyform', 'field_summary', summary)
tc.formvalue('propertyform', 'field_description', random_page())
if info:
for field, value in info.items():
tc.formvalue('propertyform', 'field_%s' % field, value)
# [BLOODHOUND] no actual button to submit /newticket `propertyform`
tc.submit()
self._post_create_ticket()
return self.ticketcount
def create_report(self, title, query, description):
"""Create a new report with the given title, query, and description
"""
self.go_to_front()
# [BLOODHOUND] View Tickets renamed to Tickets pointing at dashboard
tc.follow(r'\bTickets\b')
tc.notfind(internal_error)
tc.follow(r'\bReports\b')
tc.notfind(internal_error)
tc.formvalue('create_report', 'action', 'new') # select new report form
tc.submit()
tc.find('New Report')
tc.notfind(internal_error)
tc.formvalue('edit_report', 'title', title)
tc.formvalue('edit_report', 'description', description)
tc.formvalue('edit_report', 'query', query)
tc.submit()
reportnum = b.get_url().split('/')[-1]
# TODO: verify the url is correct
# TODO: verify the report number is correct
# TODO: verify the report does not cause an internal error
# TODO: verify the title appears on the report list
return reportnum
def create_milestone(self, name=None, due=None):
"""Creates the specified milestone, with a random name if none is
provided. Returns the name of the milestone.
"""
if name == None:
name = random_unique_camel()
milestone_url = self.url + "/admin/ticket/milestones"
tc.go(milestone_url)
tc.url(milestone_url)
tc.formvalue('addmilestone', 'name', name)
if due:
# TODO: How should we deal with differences in date formats?
tc.formvalue('addmilestone', 'duedate', due)
tc.submit()
tc.notfind(internal_error)
tc.notfind('Milestone .* already exists')
tc.url(milestone_url)
tc.find(name)
# Make sure it's on the roadmap.
tc.follow('Roadmap')
tc.url(self.url + "/roadmap")
tc.find('Milestone:.*%s' % name)
tc.follow(name)
tc.url('%s/milestone/%s' % (self.url, unicode_quote(name)))
if not due:
# [BLOODHOUND] No date set => Unscheduled
tc.find('Unscheduled')
return name
def go_to_query(self):
"""Surf to the custom query page.
"""
self.go_to_front()
# [BLOODHOUND] View Tickets (reports list) => Tickets (dashboard)
tc.follow('^Tickets$')
tc.notfind(internal_error)
tc.url(self.url + '/dashboard')
tc.follow('Custom Query')
tc.url(self.url + '/query')
def quickjump(self, search):
"""Do a quick search to jump to a page."""
tc.formvalue('mainsearch', 'q', search)
tc.submit()
tc.notfind(internal_error)
# Bloodhound functional tester extensions
def go_to_newticket(self):
self.go_to_front()
tc.follow('More fields')
def quick_create_ticket(self, summary=None, info=None):
"""Create a new (random) ticket in the test environment via quick
create ticket shortcut. Returns the new ticket number.
:param summary:
may optionally be set to the desired summary
:param info:
may optionally be set to a dictionary of field value pairs for
populating the ticket. Fields are populated afterwards by
navigating to ticket page, thereby ``info['summary']``overrides
``summary``.
`summary` and `description` default to randomly-generated values.
"""
self.go_to_front()
tc.notfind(internal_error)
if summary == None:
summary = random_sentence(4)
tc.formvalue('qct-form', 'field_summary', summary)
tc.formvalue('qct-form', 'field_description', random_page())
self._post_create_ticket()
if info:
# Second pass to update ticket fields
tc.url(self.url + '/ticket/%s' % (self.ticketcount + 1))
tc.notfind(internal_error)
for field, value in info.items():
tc.formvalue('inplace-propertyform', 'field_%s' % field, value)
tc.submit('submit')
return self.ticketcount
@staticmethod
def regex_ticket_field(fieldname, fieldval):
return r'<td [^>]*\bid="vc-%s"[^>]*>\s*%s\s*</td>' % (fieldname, fieldval)
@staticmethod
def regex_owned_by(username):
return '(Assigned to(<[^>]*>|\\n| )*%s)' % username
@staticmethod
def regex_query_column_selector(fieldname, fieldlbl):
return r'<label>( |\n)*<input[^<]*value="%s"[^<]*/>' \
r'( |\n)*<[^<]*>( |\n)*%s( |\n)*</[^<]*>' \
r'( |\n)*</label>' % (fieldname, fieldlbl)
def find_ticket_field(self, fieldname, fieldval):
"""Assert that expected value (pattern) matches value in ticket view
"""
tc.find(self.regex_ticket_field(fieldname, fieldval))
def find_owned_by(self, username):
"""Assert that a ticket is owned by a given user
"""
tc.find(self.regex_owned_by(username))
def find_query_column_selector(self, fieldname, fieldlbl):
tc.find(self.regex_query_column_selector(fieldname, fieldlbl), 's')
def as_user(self, user, restore='admin'):
"""Context manager to track access to the web site
as user and restore login afterwards (by default to admin)
"""
@contextlib.contextmanager
def user_ctx():
try:
login_ok = False
try:
self.logout()
except:
pass
if user:
self.login(user)
login_ok = True
yield self
finally:
if login_ok:
try:
self.logout()
except:
pass
if restore:
self.login(restore)
return user_ctx()
class in_product(object):
"""Context manager temporarily switching to product URL
"""
def __init__(self, tester, url=None):
self.tester = tester
self.prev_url = None
self.url = url
def __enter__(self):
"""Replace tester base URL with default product's URL
"""
self.prev_url = self.tester.url
self.tester.url = self.url if self.url else \
getattr(self.tester, 'default_product_url',
self.tester.url)
return self.tester
def __exit__(self, exc_type, exc_value, traceback):
"""Restore tester URL poiting at global environment
"""
self.tester.url = self.prev_url
def create_product(self, prefix=None, name=None, desc=None):
"""Create a product from the product list page."""
products_url = self.url + '/products'
tc.go(products_url)
tc.find('Products')
tc.submit('add', 'new')
tc.find('New Product')
prefix = prefix or random_word()
name = name or random_sentence()
desc = desc or random_paragraph()
tc.formvalue('edit', 'prefix', prefix)
tc.formvalue('edit', 'name', name)
tc.formvalue('edit', 'description', desc)
tc.submit()
tc.find('The product "%s" has been added' % prefix)
return prefix, name
def admin_create_product(self, prefix=None, name=None, owner=None):
"""Create a product from the admin page."""
admin_product_url = self.url + '/admin/ticket/products'
tc.go(admin_product_url)
tc.url(admin_product_url + '$')
prefix = prefix or random_word()
name = name or random_sentence()
owner = owner or random_word()
tc.formvalue('addproduct', 'prefix', prefix)
tc.formvalue('addproduct', 'name', name)
tc.formvalue('addproduct', 'owner', owner)
tc.submit()
tc.find(r'The product "%s" has been added' % prefix)
tc.find(r'<a href="/admin/ticket/products/%s">%s</a>'
% (prefix, prefix))
tc.find(r'<a href="/admin/ticket/products/%s">%s</a>'
% (prefix, name))
tc.find(r'<td class="owner">%s</td>' % owner)
return prefix, name, owner
def go_to_dashboard(self):
"""Surf to the dashboard page."""
self.go_to_front()
tc.follow('Tickets')
tc.url(self.url + '/dashboard')
class BloodhoundGlobalEnvFunctionalTester(BloodhoundFunctionalTester):
"""Library of higher-level operations for interacting with
a global Apache(TM) Bloodhound test environment enabled with automatic
redirects from global environment to resources in default product.
Many things have changed in recent versions of Apache(TM) Bloodhound
user interface once theme and dashboard are both installed. Beyond common
differences this functional tester also deals with :
- Tickets are created in default product context
- Admin panels for ticket fields are only accessible in default product
context
- Reports are created in default product context
As a consequence some methods of Trac functional tester have to be
executed in special ways.
"""
def __init__(self, url, *args, **kwargs):
super(BloodhoundGlobalEnvFunctionalTester,
self).__init__(url, *args, **kwargs)
self.default_product_url = None
class in_product(BloodhoundFunctionalTester.in_product):
"""Context manager temporarily switching to product URL
"""
def __init__(self, tester, url=None):
if url is not None and \
isinstance(tester, BloodhoundGlobalEnvFunctionalTester):
# Create a regular functional tester instance, no redirections
default_product_url = tester.default_product_url
tester = BloodhoundFunctionalTester(tester.url, True,
tester._state)
tester.default_product_url = default_product_url
super(self.__class__, self).__init__(tester, url)
def _post_create_ticket(self):
"""Look at the newly created ticket page after creating it
... but in default product context ...
"""
superobj = super(BloodhoundGlobalEnvFunctionalTester, self)
with self.in_product(self):
return superobj._post_create_ticket()
def create_milestone(self, name=None, due=None):
"""Creates the specified milestone, with a random name if none is
| python | Apache-2.0 | c3e31294e68af99d4e040e64fbdf52394344df9e | 2026-01-05T07:12:43.622011Z | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.